From 013c7d1f3a1334c218c816a269766b84692743ca Mon Sep 17 00:00:00 2001 From: eopXD Date: Thu, 26 Jan 2023 09:01:41 -0800 Subject: [PATCH] [2/3][Clang][RISCV] Add `__riscv_` for non-overloaded intrinsics This commit adds prefix for the non-overloaded RVV intrinsics. This is the 2nd commit of a patch-set to add __riscv_ for all RVV intrinsics. This follows the naming guideline under riscv-c-api-doc to add the `__riscv_` suffix for all RVV intrinsics. Pull Request: riscv-non-isa/riscv-c-api-doc#31 riscv-non-isa/rvv-intrinsic-doc#189 Depends on D142085. Reviewed By: kito-cheng Differential Revision: https://reviews.llvm.org/D142644 --- clang/lib/Support/RISCVVIntrinsicUtils.cpp | 4 + .../non-policy/non-overloaded/vaadd.c | 176 ++-- .../non-policy/non-overloaded/vaaddu.c | 176 ++-- .../non-policy/non-overloaded/vadc.c | 176 ++-- .../non-policy/non-overloaded/vadd.c | 352 +++---- .../non-policy/non-overloaded/vand.c | 352 +++---- .../non-policy/non-overloaded/vasub.c | 176 ++-- .../non-policy/non-overloaded/vasubu.c | 176 ++-- .../non-policy/non-overloaded/vcompress.c | 118 +-- .../non-policy/non-overloaded/vcpop.c | 28 +- .../non-policy/non-overloaded/vdiv.c | 176 ++-- .../non-policy/non-overloaded/vdivu.c | 176 ++-- .../non-policy/non-overloaded/vfabs.c | 60 +- .../non-policy/non-overloaded/vfadd.c | 120 +-- .../non-policy/non-overloaded/vfclass.c | 60 +- .../non-policy/non-overloaded/vfcvt.c | 360 +++---- .../non-policy/non-overloaded/vfdiv.c | 120 +-- .../non-policy/non-overloaded/vfirst.c | 28 +- .../non-policy/non-overloaded/vfmacc.c | 120 +-- .../non-policy/non-overloaded/vfmadd.c | 120 +-- .../non-policy/non-overloaded/vfmax.c | 120 +-- .../non-policy/non-overloaded/vfmerge.c | 30 +- .../non-policy/non-overloaded/vfmin.c | 120 +-- .../non-policy/non-overloaded/vfmsac.c | 120 +-- .../non-policy/non-overloaded/vfmsub.c | 120 +-- .../non-policy/non-overloaded/vfmul.c | 120 +-- .../non-policy/non-overloaded/vfmv.c | 90 +- .../non-policy/non-overloaded/vfncvt.c | 384 +++---- .../non-policy/non-overloaded/vfneg.c | 60 +- .../non-policy/non-overloaded/vfnmacc.c | 120 +-- .../non-policy/non-overloaded/vfnmadd.c | 120 +-- .../non-policy/non-overloaded/vfnmsac.c | 120 +-- .../non-policy/non-overloaded/vfnmsub.c | 120 +-- .../non-policy/non-overloaded/vfrdiv.c | 60 +- .../non-policy/non-overloaded/vfrec7.c | 60 +- .../non-policy/non-overloaded/vfredmax.c | 60 +- .../non-policy/non-overloaded/vfredmin.c | 60 +- .../non-policy/non-overloaded/vfredosum.c | 60 +- .../non-policy/non-overloaded/vfredusum.c | 60 +- .../non-policy/non-overloaded/vfrsqrt7.c | 60 +- .../non-policy/non-overloaded/vfrsub.c | 60 +- .../non-policy/non-overloaded/vfsgnj.c | 120 +-- .../non-policy/non-overloaded/vfsgnjn.c | 120 +-- .../non-policy/non-overloaded/vfsgnjx.c | 120 +-- .../non-policy/non-overloaded/vfslide1down.c | 60 +- .../non-policy/non-overloaded/vfslide1up.c | 60 +- .../non-policy/non-overloaded/vfsqrt.c | 60 +- .../non-policy/non-overloaded/vfsub.c | 120 +-- .../non-policy/non-overloaded/vfwadd.c | 144 +-- .../non-policy/non-overloaded/vfwcvt.c | 300 +++--- .../non-policy/non-overloaded/vfwmacc.c | 72 +- .../non-policy/non-overloaded/vfwmsac.c | 72 +- .../non-policy/non-overloaded/vfwmul.c | 72 +- .../non-policy/non-overloaded/vfwnmacc.c | 72 +- .../non-policy/non-overloaded/vfwnmsac.c | 72 +- .../non-policy/non-overloaded/vfwredosum.c | 44 +- .../non-policy/non-overloaded/vfwredusum.c | 44 +- .../non-policy/non-overloaded/vfwsub.c | 144 +-- .../non-policy/non-overloaded/vget.c | 132 +-- .../non-policy/non-overloaded/vid.c | 88 +- .../non-policy/non-overloaded/viota.c | 88 +- .../non-policy/non-overloaded/vle16.c | 72 +- .../non-policy/non-overloaded/vle16ff.c | 72 +- .../non-policy/non-overloaded/vle32.c | 60 +- .../non-policy/non-overloaded/vle32ff.c | 60 +- .../non-policy/non-overloaded/vle64.c | 48 +- .../non-policy/non-overloaded/vle64ff.c | 48 +- .../non-policy/non-overloaded/vle8.c | 56 +- .../non-policy/non-overloaded/vle8ff.c | 56 +- .../non-policy/non-overloaded/vlm.c | 14 +- .../non-policy/non-overloaded/vlmul.c | 540 +++++----- .../non-policy/non-overloaded/vloxei16.c | 228 ++--- .../non-policy/non-overloaded/vloxei32.c | 208 ++-- .../non-policy/non-overloaded/vloxei64.c | 176 ++-- .../non-policy/non-overloaded/vloxei8.c | 236 ++--- .../non-policy/non-overloaded/vloxseg2ei16.c | 192 ++-- .../non-policy/non-overloaded/vloxseg2ei32.c | 184 ++-- .../non-policy/non-overloaded/vloxseg2ei64.c | 164 +-- .../non-policy/non-overloaded/vloxseg2ei8.c | 192 ++-- .../non-policy/non-overloaded/vloxseg3ei16.c | 148 +-- .../non-policy/non-overloaded/vloxseg3ei32.c | 148 +-- .../non-policy/non-overloaded/vloxseg3ei64.c | 140 +-- .../non-policy/non-overloaded/vloxseg3ei8.c | 148 +-- .../non-policy/non-overloaded/vloxseg4ei16.c | 148 +-- .../non-policy/non-overloaded/vloxseg4ei32.c | 148 +-- .../non-policy/non-overloaded/vloxseg4ei64.c | 140 +-- .../non-policy/non-overloaded/vloxseg4ei8.c | 148 +-- .../non-policy/non-overloaded/vloxseg5ei16.c | 104 +- .../non-policy/non-overloaded/vloxseg5ei32.c | 104 +- .../non-policy/non-overloaded/vloxseg5ei64.c | 104 +- .../non-policy/non-overloaded/vloxseg5ei8.c | 104 +- .../non-policy/non-overloaded/vloxseg6ei16.c | 104 +- .../non-policy/non-overloaded/vloxseg6ei32.c | 104 +- .../non-policy/non-overloaded/vloxseg6ei64.c | 104 +- .../non-policy/non-overloaded/vloxseg6ei8.c | 104 +- .../non-policy/non-overloaded/vloxseg7ei16.c | 104 +- .../non-policy/non-overloaded/vloxseg7ei32.c | 104 +- .../non-policy/non-overloaded/vloxseg7ei64.c | 104 +- .../non-policy/non-overloaded/vloxseg7ei8.c | 104 +- .../non-policy/non-overloaded/vloxseg8ei16.c | 104 +- .../non-policy/non-overloaded/vloxseg8ei32.c | 104 +- .../non-policy/non-overloaded/vloxseg8ei64.c | 104 +- .../non-policy/non-overloaded/vloxseg8ei8.c | 104 +- .../non-policy/non-overloaded/vlse16.c | 72 +- .../non-policy/non-overloaded/vlse32.c | 60 +- .../non-policy/non-overloaded/vlse64.c | 48 +- .../non-policy/non-overloaded/vlse8.c | 56 +- .../non-policy/non-overloaded/vlseg2e16.c | 60 +- .../non-policy/non-overloaded/vlseg2e16ff.c | 60 +- .../non-policy/non-overloaded/vlseg2e32.c | 48 +- .../non-policy/non-overloaded/vlseg2e32ff.c | 48 +- .../non-policy/non-overloaded/vlseg2e64.c | 36 +- .../non-policy/non-overloaded/vlseg2e64ff.c | 36 +- .../non-policy/non-overloaded/vlseg2e8.c | 48 +- .../non-policy/non-overloaded/vlseg2e8ff.c | 48 +- .../non-policy/non-overloaded/vlseg3e16.c | 48 +- .../non-policy/non-overloaded/vlseg3e16ff.c | 48 +- .../non-policy/non-overloaded/vlseg3e32.c | 36 +- .../non-policy/non-overloaded/vlseg3e32ff.c | 36 +- .../non-policy/non-overloaded/vlseg3e64.c | 24 +- .../non-policy/non-overloaded/vlseg3e64ff.c | 24 +- .../non-policy/non-overloaded/vlseg3e8.c | 40 +- .../non-policy/non-overloaded/vlseg3e8ff.c | 40 +- .../non-policy/non-overloaded/vlseg4e16.c | 48 +- .../non-policy/non-overloaded/vlseg4e16ff.c | 48 +- .../non-policy/non-overloaded/vlseg4e32.c | 36 +- .../non-policy/non-overloaded/vlseg4e32ff.c | 36 +- .../non-policy/non-overloaded/vlseg4e64.c | 24 +- .../non-policy/non-overloaded/vlseg4e64ff.c | 24 +- .../non-policy/non-overloaded/vlseg4e8.c | 40 +- .../non-policy/non-overloaded/vlseg4e8ff.c | 40 +- .../non-policy/non-overloaded/vlseg5e16.c | 36 +- .../non-policy/non-overloaded/vlseg5e16ff.c | 36 +- .../non-policy/non-overloaded/vlseg5e32.c | 24 +- .../non-policy/non-overloaded/vlseg5e32ff.c | 24 +- .../non-policy/non-overloaded/vlseg5e64.c | 12 +- .../non-policy/non-overloaded/vlseg5e64ff.c | 12 +- .../non-policy/non-overloaded/vlseg5e8.c | 32 +- .../non-policy/non-overloaded/vlseg5e8ff.c | 32 +- .../non-policy/non-overloaded/vlseg6e16.c | 36 +- .../non-policy/non-overloaded/vlseg6e16ff.c | 36 +- .../non-policy/non-overloaded/vlseg6e32.c | 24 +- .../non-policy/non-overloaded/vlseg6e32ff.c | 24 +- .../non-policy/non-overloaded/vlseg6e64.c | 12 +- .../non-policy/non-overloaded/vlseg6e64ff.c | 12 +- .../non-policy/non-overloaded/vlseg6e8.c | 32 +- .../non-policy/non-overloaded/vlseg6e8ff.c | 32 +- .../non-policy/non-overloaded/vlseg7e16.c | 36 +- .../non-policy/non-overloaded/vlseg7e16ff.c | 36 +- .../non-policy/non-overloaded/vlseg7e32.c | 24 +- .../non-policy/non-overloaded/vlseg7e32ff.c | 24 +- .../non-policy/non-overloaded/vlseg7e64.c | 12 +- .../non-policy/non-overloaded/vlseg7e64ff.c | 12 +- .../non-policy/non-overloaded/vlseg7e8.c | 32 +- .../non-policy/non-overloaded/vlseg7e8ff.c | 32 +- .../non-policy/non-overloaded/vlseg8e16.c | 36 +- .../non-policy/non-overloaded/vlseg8e16ff.c | 36 +- .../non-policy/non-overloaded/vlseg8e32.c | 24 +- .../non-policy/non-overloaded/vlseg8e32ff.c | 24 +- .../non-policy/non-overloaded/vlseg8e64.c | 12 +- .../non-policy/non-overloaded/vlseg8e64ff.c | 12 +- .../non-policy/non-overloaded/vlseg8e8.c | 32 +- .../non-policy/non-overloaded/vlseg8e8ff.c | 32 +- .../non-policy/non-overloaded/vlsseg2e16.c | 60 +- .../non-policy/non-overloaded/vlsseg2e32.c | 48 +- .../non-policy/non-overloaded/vlsseg2e64.c | 36 +- .../non-policy/non-overloaded/vlsseg2e8.c | 48 +- .../non-policy/non-overloaded/vlsseg3e16.c | 48 +- .../non-policy/non-overloaded/vlsseg3e32.c | 36 +- .../non-policy/non-overloaded/vlsseg3e64.c | 24 +- .../non-policy/non-overloaded/vlsseg3e8.c | 40 +- .../non-policy/non-overloaded/vlsseg4e16.c | 48 +- .../non-policy/non-overloaded/vlsseg4e32.c | 36 +- .../non-policy/non-overloaded/vlsseg4e64.c | 24 +- .../non-policy/non-overloaded/vlsseg4e8.c | 40 +- .../non-policy/non-overloaded/vlsseg5e16.c | 36 +- .../non-policy/non-overloaded/vlsseg5e32.c | 24 +- .../non-policy/non-overloaded/vlsseg5e64.c | 12 +- .../non-policy/non-overloaded/vlsseg5e8.c | 32 +- .../non-policy/non-overloaded/vlsseg6e16.c | 36 +- .../non-policy/non-overloaded/vlsseg6e32.c | 24 +- .../non-policy/non-overloaded/vlsseg6e64.c | 12 +- .../non-policy/non-overloaded/vlsseg6e8.c | 32 +- .../non-policy/non-overloaded/vlsseg7e16.c | 36 +- .../non-policy/non-overloaded/vlsseg7e32.c | 24 +- .../non-policy/non-overloaded/vlsseg7e64.c | 12 +- .../non-policy/non-overloaded/vlsseg7e8.c | 32 +- .../non-policy/non-overloaded/vlsseg8e16.c | 36 +- .../non-policy/non-overloaded/vlsseg8e32.c | 24 +- .../non-policy/non-overloaded/vlsseg8e64.c | 12 +- .../non-policy/non-overloaded/vlsseg8e8.c | 32 +- .../non-policy/non-overloaded/vluxei16.c | 228 ++--- .../non-policy/non-overloaded/vluxei32.c | 208 ++-- .../non-policy/non-overloaded/vluxei64.c | 176 ++-- .../non-policy/non-overloaded/vluxei8.c | 236 ++--- .../non-policy/non-overloaded/vluxseg2ei16.c | 192 ++-- .../non-policy/non-overloaded/vluxseg2ei32.c | 184 ++-- .../non-policy/non-overloaded/vluxseg2ei64.c | 164 +-- .../non-policy/non-overloaded/vluxseg2ei8.c | 192 ++-- .../non-policy/non-overloaded/vluxseg3ei16.c | 148 +-- .../non-policy/non-overloaded/vluxseg3ei32.c | 148 +-- .../non-policy/non-overloaded/vluxseg3ei64.c | 140 +-- .../non-policy/non-overloaded/vluxseg3ei8.c | 148 +-- .../non-policy/non-overloaded/vluxseg4ei16.c | 148 +-- .../non-policy/non-overloaded/vluxseg4ei32.c | 148 +-- .../non-policy/non-overloaded/vluxseg4ei64.c | 140 +-- .../non-policy/non-overloaded/vluxseg4ei8.c | 148 +-- .../non-policy/non-overloaded/vluxseg5ei16.c | 104 +- .../non-policy/non-overloaded/vluxseg5ei32.c | 104 +- .../non-policy/non-overloaded/vluxseg5ei64.c | 104 +- .../non-policy/non-overloaded/vluxseg5ei8.c | 104 +- .../non-policy/non-overloaded/vluxseg6ei16.c | 104 +- .../non-policy/non-overloaded/vluxseg6ei32.c | 104 +- .../non-policy/non-overloaded/vluxseg6ei64.c | 104 +- .../non-policy/non-overloaded/vluxseg6ei8.c | 104 +- .../non-policy/non-overloaded/vluxseg7ei16.c | 104 +- .../non-policy/non-overloaded/vluxseg7ei32.c | 104 +- .../non-policy/non-overloaded/vluxseg7ei64.c | 104 +- .../non-policy/non-overloaded/vluxseg7ei8.c | 104 +- .../non-policy/non-overloaded/vluxseg8ei16.c | 104 +- .../non-policy/non-overloaded/vluxseg8ei32.c | 104 +- .../non-policy/non-overloaded/vluxseg8ei64.c | 104 +- .../non-policy/non-overloaded/vluxseg8ei8.c | 104 +- .../non-policy/non-overloaded/vmacc.c | 352 +++---- .../non-policy/non-overloaded/vmadc.c | 352 +++---- .../non-policy/non-overloaded/vmadd.c | 352 +++---- .../non-policy/non-overloaded/vmand.c | 14 +- .../non-policy/non-overloaded/vmandn.c | 14 +- .../non-policy/non-overloaded/vmax.c | 176 ++-- .../non-policy/non-overloaded/vmaxu.c | 176 ++-- .../non-policy/non-overloaded/vmclr.c | 14 +- .../non-policy/non-overloaded/vmerge.c | 206 ++-- .../non-policy/non-overloaded/vmfeq.c | 120 +-- .../non-policy/non-overloaded/vmfge.c | 120 +-- .../non-policy/non-overloaded/vmfgt.c | 120 +-- .../non-policy/non-overloaded/vmfle.c | 120 +-- .../non-policy/non-overloaded/vmflt.c | 120 +-- .../non-policy/non-overloaded/vmfne.c | 120 +-- .../non-policy/non-overloaded/vmin.c | 176 ++-- .../non-policy/non-overloaded/vminu.c | 176 ++-- .../non-policy/non-overloaded/vmmv.c | 14 +- .../non-policy/non-overloaded/vmnand.c | 14 +- .../non-policy/non-overloaded/vmnor.c | 14 +- .../non-policy/non-overloaded/vmnot.c | 14 +- .../non-policy/non-overloaded/vmor.c | 14 +- .../non-policy/non-overloaded/vmorn.c | 14 +- .../non-policy/non-overloaded/vmsbc.c | 352 +++---- .../non-policy/non-overloaded/vmsbf.c | 28 +- .../non-policy/non-overloaded/vmseq.c | 352 +++---- .../non-policy/non-overloaded/vmset.c | 14 +- .../non-policy/non-overloaded/vmsge.c | 176 ++-- .../non-policy/non-overloaded/vmsgeu.c | 176 ++-- .../non-policy/non-overloaded/vmsgt.c | 176 ++-- .../non-policy/non-overloaded/vmsgtu.c | 176 ++-- .../non-policy/non-overloaded/vmsif.c | 28 +- .../non-policy/non-overloaded/vmsle.c | 176 ++-- .../non-policy/non-overloaded/vmsleu.c | 176 ++-- .../non-policy/non-overloaded/vmslt.c | 176 ++-- .../non-policy/non-overloaded/vmsltu.c | 176 ++-- .../non-policy/non-overloaded/vmsne.c | 352 +++---- .../non-policy/non-overloaded/vmsof.c | 28 +- .../non-policy/non-overloaded/vmul.c | 352 +++---- .../non-policy/non-overloaded/vmulh.c | 176 ++-- .../non-policy/non-overloaded/vmulhsu.c | 176 ++-- .../non-policy/non-overloaded/vmulhu.c | 176 ++-- .../non-policy/non-overloaded/vmv.c | 382 +++---- .../non-policy/non-overloaded/vmxnor.c | 14 +- .../non-policy/non-overloaded/vmxor.c | 14 +- .../non-policy/non-overloaded/vnclip.c | 120 +-- .../non-policy/non-overloaded/vnclipu.c | 120 +-- .../non-policy/non-overloaded/vncvt.c | 120 +-- .../non-policy/non-overloaded/vneg.c | 88 +- .../non-policy/non-overloaded/vnmsac.c | 352 +++---- .../non-policy/non-overloaded/vnmsub.c | 352 +++---- .../non-policy/non-overloaded/vnot.c | 176 ++-- .../non-policy/non-overloaded/vnsra.c | 120 +-- .../non-policy/non-overloaded/vnsrl.c | 120 +-- .../non-policy/non-overloaded/vor.c | 352 +++---- .../non-policy/non-overloaded/vredand.c | 176 ++-- .../non-policy/non-overloaded/vredmax.c | 88 +- .../non-policy/non-overloaded/vredmaxu.c | 88 +- .../non-policy/non-overloaded/vredmin.c | 88 +- .../non-policy/non-overloaded/vredminu.c | 88 +- .../non-policy/non-overloaded/vredor.c | 176 ++-- .../non-policy/non-overloaded/vredsum.c | 176 ++-- .../non-policy/non-overloaded/vredxor.c | 176 ++-- .../non-policy/non-overloaded/vreinterpret.c | 432 ++++---- .../non-policy/non-overloaded/vrem.c | 176 ++-- .../non-policy/non-overloaded/vremu.c | 176 ++-- .../non-policy/non-overloaded/vrgather.c | 472 ++++----- .../non-policy/non-overloaded/vrgatherei16.c | 228 ++--- .../non-policy/non-overloaded/vrsub.c | 176 ++-- .../non-policy/non-overloaded/vsadd.c | 176 ++-- .../non-policy/non-overloaded/vsaddu.c | 176 ++-- .../non-policy/non-overloaded/vsbc.c | 176 ++-- .../non-policy/non-overloaded/vse16.c | 72 +- .../non-policy/non-overloaded/vse32.c | 60 +- .../non-policy/non-overloaded/vse64.c | 48 +- .../non-policy/non-overloaded/vse8.c | 56 +- .../non-policy/non-overloaded/vset.c | 132 +-- .../non-policy/non-overloaded/vsext.c | 112 +-- .../non-policy/non-overloaded/vslide1down.c | 176 ++-- .../non-policy/non-overloaded/vslide1up.c | 176 ++-- .../non-policy/non-overloaded/vslidedown.c | 236 ++--- .../non-policy/non-overloaded/vslideup.c | 236 ++--- .../non-policy/non-overloaded/vsll.c | 352 +++---- .../non-policy/non-overloaded/vsm.c | 14 +- .../non-policy/non-overloaded/vsmul.c | 176 ++-- .../non-policy/non-overloaded/vsoxei16.c | 228 ++--- .../non-policy/non-overloaded/vsoxei32.c | 208 ++-- .../non-policy/non-overloaded/vsoxei64.c | 176 ++-- .../non-policy/non-overloaded/vsoxei8.c | 236 ++--- .../non-policy/non-overloaded/vsoxseg2ei16.c | 192 ++-- .../non-policy/non-overloaded/vsoxseg2ei32.c | 184 ++-- .../non-policy/non-overloaded/vsoxseg2ei64.c | 164 +-- .../non-policy/non-overloaded/vsoxseg2ei8.c | 192 ++-- .../non-policy/non-overloaded/vsoxseg3ei16.c | 148 +-- .../non-policy/non-overloaded/vsoxseg3ei32.c | 148 +-- .../non-policy/non-overloaded/vsoxseg3ei64.c | 140 +-- .../non-policy/non-overloaded/vsoxseg3ei8.c | 148 +-- .../non-policy/non-overloaded/vsoxseg4ei16.c | 148 +-- .../non-policy/non-overloaded/vsoxseg4ei32.c | 148 +-- .../non-policy/non-overloaded/vsoxseg4ei64.c | 140 +-- .../non-policy/non-overloaded/vsoxseg4ei8.c | 148 +-- .../non-policy/non-overloaded/vsoxseg5ei16.c | 104 +- .../non-policy/non-overloaded/vsoxseg5ei32.c | 104 +- .../non-policy/non-overloaded/vsoxseg5ei64.c | 104 +- .../non-policy/non-overloaded/vsoxseg5ei8.c | 104 +- .../non-policy/non-overloaded/vsoxseg6ei16.c | 104 +- .../non-policy/non-overloaded/vsoxseg6ei32.c | 104 +- .../non-policy/non-overloaded/vsoxseg6ei64.c | 104 +- .../non-policy/non-overloaded/vsoxseg6ei8.c | 104 +- .../non-policy/non-overloaded/vsoxseg7ei16.c | 104 +- .../non-policy/non-overloaded/vsoxseg7ei32.c | 104 +- .../non-policy/non-overloaded/vsoxseg7ei64.c | 104 +- .../non-policy/non-overloaded/vsoxseg7ei8.c | 104 +- .../non-policy/non-overloaded/vsoxseg8ei16.c | 104 +- .../non-policy/non-overloaded/vsoxseg8ei32.c | 104 +- .../non-policy/non-overloaded/vsoxseg8ei64.c | 104 +- .../non-policy/non-overloaded/vsoxseg8ei8.c | 104 +- .../non-policy/non-overloaded/vsra.c | 176 ++-- .../non-policy/non-overloaded/vsrl.c | 176 ++-- .../non-policy/non-overloaded/vsse16.c | 72 +- .../non-policy/non-overloaded/vsse32.c | 60 +- .../non-policy/non-overloaded/vsse64.c | 48 +- .../non-policy/non-overloaded/vsse8.c | 56 +- .../non-policy/non-overloaded/vsseg2e16.c | 60 +- .../non-policy/non-overloaded/vsseg2e32.c | 48 +- .../non-policy/non-overloaded/vsseg2e64.c | 36 +- .../non-policy/non-overloaded/vsseg2e8.c | 48 +- .../non-policy/non-overloaded/vsseg3e16.c | 48 +- .../non-policy/non-overloaded/vsseg3e32.c | 36 +- .../non-policy/non-overloaded/vsseg3e64.c | 24 +- .../non-policy/non-overloaded/vsseg3e8.c | 40 +- .../non-policy/non-overloaded/vsseg4e16.c | 48 +- .../non-policy/non-overloaded/vsseg4e32.c | 36 +- .../non-policy/non-overloaded/vsseg4e64.c | 24 +- .../non-policy/non-overloaded/vsseg4e8.c | 40 +- .../non-policy/non-overloaded/vsseg5e16.c | 36 +- .../non-policy/non-overloaded/vsseg5e32.c | 24 +- .../non-policy/non-overloaded/vsseg5e64.c | 12 +- .../non-policy/non-overloaded/vsseg5e8.c | 32 +- .../non-policy/non-overloaded/vsseg6e16.c | 36 +- .../non-policy/non-overloaded/vsseg6e32.c | 24 +- .../non-policy/non-overloaded/vsseg6e64.c | 12 +- .../non-policy/non-overloaded/vsseg6e8.c | 32 +- .../non-policy/non-overloaded/vsseg7e16.c | 36 +- .../non-policy/non-overloaded/vsseg7e32.c | 24 +- .../non-policy/non-overloaded/vsseg7e64.c | 12 +- .../non-policy/non-overloaded/vsseg7e8.c | 32 +- .../non-policy/non-overloaded/vsseg8e16.c | 36 +- .../non-policy/non-overloaded/vsseg8e32.c | 24 +- .../non-policy/non-overloaded/vsseg8e64.c | 12 +- .../non-policy/non-overloaded/vsseg8e8.c | 32 +- .../non-policy/non-overloaded/vssra.c | 176 ++-- .../non-policy/non-overloaded/vssrl.c | 176 ++-- .../non-policy/non-overloaded/vssseg2e16.c | 60 +- .../non-policy/non-overloaded/vssseg2e32.c | 48 +- .../non-policy/non-overloaded/vssseg2e64.c | 36 +- .../non-policy/non-overloaded/vssseg2e8.c | 48 +- .../non-policy/non-overloaded/vssseg3e16.c | 48 +- .../non-policy/non-overloaded/vssseg3e32.c | 36 +- .../non-policy/non-overloaded/vssseg3e64.c | 24 +- .../non-policy/non-overloaded/vssseg3e8.c | 40 +- .../non-policy/non-overloaded/vssseg4e16.c | 48 +- .../non-policy/non-overloaded/vssseg4e32.c | 36 +- .../non-policy/non-overloaded/vssseg4e64.c | 24 +- .../non-policy/non-overloaded/vssseg4e8.c | 40 +- .../non-policy/non-overloaded/vssseg5e16.c | 36 +- .../non-policy/non-overloaded/vssseg5e32.c | 24 +- .../non-policy/non-overloaded/vssseg5e64.c | 12 +- .../non-policy/non-overloaded/vssseg5e8.c | 32 +- .../non-policy/non-overloaded/vssseg6e16.c | 36 +- .../non-policy/non-overloaded/vssseg6e32.c | 24 +- .../non-policy/non-overloaded/vssseg6e64.c | 12 +- .../non-policy/non-overloaded/vssseg6e8.c | 32 +- .../non-policy/non-overloaded/vssseg7e16.c | 36 +- .../non-policy/non-overloaded/vssseg7e32.c | 24 +- .../non-policy/non-overloaded/vssseg7e64.c | 12 +- .../non-policy/non-overloaded/vssseg7e8.c | 32 +- .../non-policy/non-overloaded/vssseg8e16.c | 36 +- .../non-policy/non-overloaded/vssseg8e32.c | 24 +- .../non-policy/non-overloaded/vssseg8e64.c | 12 +- .../non-policy/non-overloaded/vssseg8e8.c | 32 +- .../non-policy/non-overloaded/vssub.c | 176 ++-- .../non-policy/non-overloaded/vssubu.c | 176 ++-- .../non-policy/non-overloaded/vsub.c | 352 +++---- .../non-policy/non-overloaded/vsuxei16.c | 228 ++--- .../non-policy/non-overloaded/vsuxei32.c | 208 ++-- .../non-policy/non-overloaded/vsuxei64.c | 176 ++-- .../non-policy/non-overloaded/vsuxei8.c | 236 ++--- .../non-policy/non-overloaded/vsuxseg2ei16.c | 192 ++-- .../non-policy/non-overloaded/vsuxseg2ei32.c | 184 ++-- .../non-policy/non-overloaded/vsuxseg2ei64.c | 164 +-- .../non-policy/non-overloaded/vsuxseg2ei8.c | 192 ++-- .../non-policy/non-overloaded/vsuxseg3ei16.c | 148 +-- .../non-policy/non-overloaded/vsuxseg3ei32.c | 148 +-- .../non-policy/non-overloaded/vsuxseg3ei64.c | 140 +-- .../non-policy/non-overloaded/vsuxseg3ei8.c | 148 +-- .../non-policy/non-overloaded/vsuxseg4ei16.c | 148 +-- .../non-policy/non-overloaded/vsuxseg4ei32.c | 148 +-- .../non-policy/non-overloaded/vsuxseg4ei64.c | 140 +-- .../non-policy/non-overloaded/vsuxseg4ei8.c | 148 +-- .../non-policy/non-overloaded/vsuxseg5ei16.c | 104 +- .../non-policy/non-overloaded/vsuxseg5ei32.c | 104 +- .../non-policy/non-overloaded/vsuxseg5ei64.c | 104 +- .../non-policy/non-overloaded/vsuxseg5ei8.c | 104 +- .../non-policy/non-overloaded/vsuxseg6ei16.c | 104 +- .../non-policy/non-overloaded/vsuxseg6ei32.c | 104 +- .../non-policy/non-overloaded/vsuxseg6ei64.c | 104 +- .../non-policy/non-overloaded/vsuxseg6ei8.c | 104 +- .../non-policy/non-overloaded/vsuxseg7ei16.c | 104 +- .../non-policy/non-overloaded/vsuxseg7ei32.c | 104 +- .../non-policy/non-overloaded/vsuxseg7ei64.c | 104 +- .../non-policy/non-overloaded/vsuxseg7ei8.c | 104 +- .../non-policy/non-overloaded/vsuxseg8ei16.c | 104 +- .../non-policy/non-overloaded/vsuxseg8ei32.c | 104 +- .../non-policy/non-overloaded/vsuxseg8ei64.c | 104 +- .../non-policy/non-overloaded/vsuxseg8ei8.c | 104 +- .../non-policy/non-overloaded/vundefined.c | 118 +-- .../non-policy/non-overloaded/vwadd.c | 240 ++--- .../non-policy/non-overloaded/vwaddu.c | 240 ++--- .../non-policy/non-overloaded/vwcvt.c | 60 +- .../non-policy/non-overloaded/vwcvtu.c | 60 +- .../non-policy/non-overloaded/vwmacc.c | 120 +-- .../non-policy/non-overloaded/vwmaccsu.c | 120 +-- .../non-policy/non-overloaded/vwmaccu.c | 120 +-- .../non-policy/non-overloaded/vwmaccus.c | 60 +- .../non-policy/non-overloaded/vwmul.c | 120 +-- .../non-policy/non-overloaded/vwmulsu.c | 120 +-- .../non-policy/non-overloaded/vwmulu.c | 120 +-- .../non-policy/non-overloaded/vwredsum.c | 72 +- .../non-policy/non-overloaded/vwredsumu.c | 72 +- .../non-policy/non-overloaded/vwsub.c | 240 ++--- .../non-policy/non-overloaded/vwsubu.c | 240 ++--- .../non-policy/non-overloaded/vxor.c | 352 +++---- .../non-policy/non-overloaded/vzext.c | 112 +-- .../policy/non-overloaded/vaadd.c | 352 +++---- .../policy/non-overloaded/vaaddu.c | 352 +++---- .../policy/non-overloaded/vadc.c | 176 ++-- .../policy/non-overloaded/vadd.c | 704 ++++++------- .../policy/non-overloaded/vand.c | 704 ++++++------- .../policy/non-overloaded/vasub.c | 352 +++---- .../policy/non-overloaded/vasubu.c | 352 +++---- .../policy/non-overloaded/vcompress.c | 118 +-- .../policy/non-overloaded/vdiv.c | 352 +++---- .../policy/non-overloaded/vdivu.c | 352 +++---- .../policy/non-overloaded/vfabs.c | 120 +-- .../policy/non-overloaded/vfadd.c | 240 ++--- .../policy/non-overloaded/vfclass.c | 120 +-- .../policy/non-overloaded/vfcvt.c | 720 ++++++------- .../policy/non-overloaded/vfdiv.c | 240 ++--- .../policy/non-overloaded/vfmacc.c | 240 ++--- .../policy/non-overloaded/vfmadd.c | 240 ++--- .../policy/non-overloaded/vfmax.c | 240 ++--- .../policy/non-overloaded/vfmerge.c | 30 +- .../policy/non-overloaded/vfmin.c | 240 ++--- .../policy/non-overloaded/vfmsac.c | 240 ++--- .../policy/non-overloaded/vfmsub.c | 240 ++--- .../policy/non-overloaded/vfmul.c | 240 ++--- .../policy/non-overloaded/vfmv.c | 60 +- .../policy/non-overloaded/vfncvt.c | 768 +++++++------- .../policy/non-overloaded/vfneg.c | 120 +-- .../policy/non-overloaded/vfnmacc.c | 240 ++--- .../policy/non-overloaded/vfnmadd.c | 240 ++--- .../policy/non-overloaded/vfnmsac.c | 240 ++--- .../policy/non-overloaded/vfnmsub.c | 240 ++--- .../policy/non-overloaded/vfrdiv.c | 120 +-- .../policy/non-overloaded/vfrec7.c | 120 +-- .../policy/non-overloaded/vfredmax.c | 60 +- .../policy/non-overloaded/vfredmin.c | 60 +- .../policy/non-overloaded/vfredosum.c | 60 +- .../policy/non-overloaded/vfredusum.c | 60 +- .../policy/non-overloaded/vfrsqrt7.c | 120 +-- .../policy/non-overloaded/vfrsub.c | 120 +-- .../policy/non-overloaded/vfsgnj.c | 240 ++--- .../policy/non-overloaded/vfsgnjn.c | 240 ++--- .../policy/non-overloaded/vfsgnjx.c | 240 ++--- .../policy/non-overloaded/vfslide1down.c | 120 +-- .../policy/non-overloaded/vfslide1up.c | 120 +-- .../policy/non-overloaded/vfsqrt.c | 120 +-- .../policy/non-overloaded/vfsub.c | 240 ++--- .../policy/non-overloaded/vfwadd.c | 288 +++--- .../policy/non-overloaded/vfwcvt.c | 600 +++++------ .../policy/non-overloaded/vfwmacc.c | 144 +-- .../policy/non-overloaded/vfwmsac.c | 144 +-- .../policy/non-overloaded/vfwmul.c | 144 +-- .../policy/non-overloaded/vfwnmacc.c | 144 +-- .../policy/non-overloaded/vfwnmsac.c | 144 +-- .../policy/non-overloaded/vfwredosum.c | 44 +- .../policy/non-overloaded/vfwredusum.c | 44 +- .../policy/non-overloaded/vfwsub.c | 288 +++--- .../policy/non-overloaded/vid.c | 176 ++-- .../policy/non-overloaded/viota.c | 176 ++-- .../policy/non-overloaded/vle16.c | 144 +-- .../policy/non-overloaded/vle16ff.c | 144 +-- .../policy/non-overloaded/vle32.c | 120 +-- .../policy/non-overloaded/vle32ff.c | 120 +-- .../policy/non-overloaded/vle64.c | 96 +- .../policy/non-overloaded/vle64ff.c | 96 +- .../policy/non-overloaded/vle8.c | 112 +-- .../policy/non-overloaded/vle8ff.c | 112 +-- .../policy/non-overloaded/vloxei16.c | 456 ++++----- .../policy/non-overloaded/vloxei32.c | 416 ++++---- .../policy/non-overloaded/vloxei64.c | 352 +++---- .../policy/non-overloaded/vloxei8.c | 472 ++++----- .../policy/non-overloaded/vloxseg2ei16.c | 384 +++---- .../policy/non-overloaded/vloxseg2ei32.c | 368 +++---- .../policy/non-overloaded/vloxseg2ei64.c | 328 +++--- .../policy/non-overloaded/vloxseg2ei8.c | 384 +++---- .../policy/non-overloaded/vloxseg3ei16.c | 296 +++--- .../policy/non-overloaded/vloxseg3ei32.c | 296 +++--- .../policy/non-overloaded/vloxseg3ei64.c | 280 +++--- .../policy/non-overloaded/vloxseg3ei8.c | 296 +++--- .../policy/non-overloaded/vloxseg4ei16.c | 296 +++--- .../policy/non-overloaded/vloxseg4ei32.c | 296 +++--- .../policy/non-overloaded/vloxseg4ei64.c | 280 +++--- .../policy/non-overloaded/vloxseg4ei8.c | 296 +++--- .../policy/non-overloaded/vloxseg5ei16.c | 208 ++-- .../policy/non-overloaded/vloxseg5ei32.c | 208 ++-- .../policy/non-overloaded/vloxseg5ei64.c | 208 ++-- .../policy/non-overloaded/vloxseg5ei8.c | 208 ++-- .../policy/non-overloaded/vloxseg6ei16.c | 208 ++-- .../policy/non-overloaded/vloxseg6ei32.c | 208 ++-- .../policy/non-overloaded/vloxseg6ei64.c | 208 ++-- .../policy/non-overloaded/vloxseg6ei8.c | 208 ++-- .../policy/non-overloaded/vloxseg7ei16.c | 208 ++-- .../policy/non-overloaded/vloxseg7ei32.c | 208 ++-- .../policy/non-overloaded/vloxseg7ei64.c | 208 ++-- .../policy/non-overloaded/vloxseg7ei8.c | 208 ++-- .../policy/non-overloaded/vloxseg8ei16.c | 208 ++-- .../policy/non-overloaded/vloxseg8ei32.c | 208 ++-- .../policy/non-overloaded/vloxseg8ei64.c | 208 ++-- .../policy/non-overloaded/vloxseg8ei8.c | 208 ++-- .../policy/non-overloaded/vlse16.c | 144 +-- .../policy/non-overloaded/vlse32.c | 120 +-- .../policy/non-overloaded/vlse64.c | 96 +- .../policy/non-overloaded/vlse8.c | 112 +-- .../policy/non-overloaded/vlseg2e16.c | 120 +-- .../policy/non-overloaded/vlseg2e16ff.c | 120 +-- .../policy/non-overloaded/vlseg2e32.c | 96 +- .../policy/non-overloaded/vlseg2e32ff.c | 96 +- .../policy/non-overloaded/vlseg2e64.c | 72 +- .../policy/non-overloaded/vlseg2e64ff.c | 72 +- .../policy/non-overloaded/vlseg2e8.c | 96 +- .../policy/non-overloaded/vlseg2e8ff.c | 96 +- .../policy/non-overloaded/vlseg3e16.c | 96 +- .../policy/non-overloaded/vlseg3e16ff.c | 96 +- .../policy/non-overloaded/vlseg3e32.c | 72 +- .../policy/non-overloaded/vlseg3e32ff.c | 72 +- .../policy/non-overloaded/vlseg3e64.c | 48 +- .../policy/non-overloaded/vlseg3e64ff.c | 48 +- .../policy/non-overloaded/vlseg3e8.c | 80 +- .../policy/non-overloaded/vlseg3e8ff.c | 80 +- .../policy/non-overloaded/vlseg4e16.c | 96 +- .../policy/non-overloaded/vlseg4e16ff.c | 96 +- .../policy/non-overloaded/vlseg4e32.c | 72 +- .../policy/non-overloaded/vlseg4e32ff.c | 72 +- .../policy/non-overloaded/vlseg4e64.c | 48 +- .../policy/non-overloaded/vlseg4e64ff.c | 48 +- .../policy/non-overloaded/vlseg4e8.c | 80 +- .../policy/non-overloaded/vlseg4e8ff.c | 80 +- .../policy/non-overloaded/vlseg5e16.c | 72 +- .../policy/non-overloaded/vlseg5e16ff.c | 72 +- .../policy/non-overloaded/vlseg5e32.c | 48 +- .../policy/non-overloaded/vlseg5e32ff.c | 48 +- .../policy/non-overloaded/vlseg5e64.c | 24 +- .../policy/non-overloaded/vlseg5e64ff.c | 24 +- .../policy/non-overloaded/vlseg5e8.c | 64 +- .../policy/non-overloaded/vlseg5e8ff.c | 64 +- .../policy/non-overloaded/vlseg6e16.c | 72 +- .../policy/non-overloaded/vlseg6e16ff.c | 72 +- .../policy/non-overloaded/vlseg6e32.c | 48 +- .../policy/non-overloaded/vlseg6e32ff.c | 48 +- .../policy/non-overloaded/vlseg6e64.c | 24 +- .../policy/non-overloaded/vlseg6e64ff.c | 24 +- .../policy/non-overloaded/vlseg6e8.c | 64 +- .../policy/non-overloaded/vlseg6e8ff.c | 64 +- .../policy/non-overloaded/vlseg7e16.c | 72 +- .../policy/non-overloaded/vlseg7e16ff.c | 72 +- .../policy/non-overloaded/vlseg7e32.c | 48 +- .../policy/non-overloaded/vlseg7e32ff.c | 48 +- .../policy/non-overloaded/vlseg7e64.c | 24 +- .../policy/non-overloaded/vlseg7e64ff.c | 24 +- .../policy/non-overloaded/vlseg7e8.c | 64 +- .../policy/non-overloaded/vlseg7e8ff.c | 64 +- .../policy/non-overloaded/vlseg8e16.c | 72 +- .../policy/non-overloaded/vlseg8e16ff.c | 72 +- .../policy/non-overloaded/vlseg8e32.c | 48 +- .../policy/non-overloaded/vlseg8e32ff.c | 48 +- .../policy/non-overloaded/vlseg8e64.c | 24 +- .../policy/non-overloaded/vlseg8e64ff.c | 24 +- .../policy/non-overloaded/vlseg8e8.c | 64 +- .../policy/non-overloaded/vlseg8e8ff.c | 64 +- .../policy/non-overloaded/vlsseg2e16.c | 120 +-- .../policy/non-overloaded/vlsseg2e32.c | 96 +- .../policy/non-overloaded/vlsseg2e64.c | 72 +- .../policy/non-overloaded/vlsseg2e8.c | 96 +- .../policy/non-overloaded/vlsseg3e16.c | 96 +- .../policy/non-overloaded/vlsseg3e32.c | 72 +- .../policy/non-overloaded/vlsseg3e64.c | 48 +- .../policy/non-overloaded/vlsseg3e8.c | 80 +- .../policy/non-overloaded/vlsseg4e16.c | 96 +- .../policy/non-overloaded/vlsseg4e32.c | 72 +- .../policy/non-overloaded/vlsseg4e64.c | 48 +- .../policy/non-overloaded/vlsseg4e8.c | 80 +- .../policy/non-overloaded/vlsseg5e16.c | 72 +- .../policy/non-overloaded/vlsseg5e32.c | 48 +- .../policy/non-overloaded/vlsseg5e64.c | 24 +- .../policy/non-overloaded/vlsseg5e8.c | 64 +- .../policy/non-overloaded/vlsseg6e16.c | 72 +- .../policy/non-overloaded/vlsseg6e32.c | 48 +- .../policy/non-overloaded/vlsseg6e64.c | 24 +- .../policy/non-overloaded/vlsseg6e8.c | 64 +- .../policy/non-overloaded/vlsseg7e16.c | 72 +- .../policy/non-overloaded/vlsseg7e32.c | 48 +- .../policy/non-overloaded/vlsseg7e64.c | 24 +- .../policy/non-overloaded/vlsseg7e8.c | 64 +- .../policy/non-overloaded/vlsseg8e16.c | 72 +- .../policy/non-overloaded/vlsseg8e32.c | 48 +- .../policy/non-overloaded/vlsseg8e64.c | 24 +- .../policy/non-overloaded/vlsseg8e8.c | 64 +- .../policy/non-overloaded/vluxei16.c | 456 ++++----- .../policy/non-overloaded/vluxei32.c | 416 ++++---- .../policy/non-overloaded/vluxei64.c | 352 +++---- .../policy/non-overloaded/vluxei8.c | 472 ++++----- .../policy/non-overloaded/vluxseg2ei16.c | 384 +++---- .../policy/non-overloaded/vluxseg2ei32.c | 368 +++---- .../policy/non-overloaded/vluxseg2ei64.c | 328 +++--- .../policy/non-overloaded/vluxseg2ei8.c | 384 +++---- .../policy/non-overloaded/vluxseg3ei16.c | 296 +++--- .../policy/non-overloaded/vluxseg3ei32.c | 296 +++--- .../policy/non-overloaded/vluxseg3ei64.c | 280 +++--- .../policy/non-overloaded/vluxseg3ei8.c | 296 +++--- .../policy/non-overloaded/vluxseg4ei16.c | 296 +++--- .../policy/non-overloaded/vluxseg4ei32.c | 296 +++--- .../policy/non-overloaded/vluxseg4ei64.c | 280 +++--- .../policy/non-overloaded/vluxseg4ei8.c | 296 +++--- .../policy/non-overloaded/vluxseg5ei16.c | 208 ++-- .../policy/non-overloaded/vluxseg5ei32.c | 208 ++-- .../policy/non-overloaded/vluxseg5ei64.c | 208 ++-- .../policy/non-overloaded/vluxseg5ei8.c | 208 ++-- .../policy/non-overloaded/vluxseg6ei16.c | 208 ++-- .../policy/non-overloaded/vluxseg6ei32.c | 208 ++-- .../policy/non-overloaded/vluxseg6ei64.c | 208 ++-- .../policy/non-overloaded/vluxseg6ei8.c | 208 ++-- .../policy/non-overloaded/vluxseg7ei16.c | 208 ++-- .../policy/non-overloaded/vluxseg7ei32.c | 208 ++-- .../policy/non-overloaded/vluxseg7ei64.c | 208 ++-- .../policy/non-overloaded/vluxseg7ei8.c | 208 ++-- .../policy/non-overloaded/vluxseg8ei16.c | 208 ++-- .../policy/non-overloaded/vluxseg8ei32.c | 208 ++-- .../policy/non-overloaded/vluxseg8ei64.c | 208 ++-- .../policy/non-overloaded/vluxseg8ei8.c | 208 ++-- .../policy/non-overloaded/vmacc.c | 704 ++++++------- .../policy/non-overloaded/vmadd.c | 704 ++++++------- .../policy/non-overloaded/vmax.c | 352 +++---- .../policy/non-overloaded/vmaxu.c | 352 +++---- .../policy/non-overloaded/vmerge.c | 206 ++-- .../policy/non-overloaded/vmfeq.c | 60 +- .../policy/non-overloaded/vmfge.c | 60 +- .../policy/non-overloaded/vmfgt.c | 60 +- .../policy/non-overloaded/vmfle.c | 60 +- .../policy/non-overloaded/vmflt.c | 60 +- .../policy/non-overloaded/vmfne.c | 60 +- .../policy/non-overloaded/vmin.c | 352 +++---- .../policy/non-overloaded/vminu.c | 352 +++---- .../policy/non-overloaded/vmsbf.c | 14 +- .../policy/non-overloaded/vmseq.c | 176 ++-- .../policy/non-overloaded/vmsge.c | 88 +- .../policy/non-overloaded/vmsgeu.c | 88 +- .../policy/non-overloaded/vmsgt.c | 88 +- .../policy/non-overloaded/vmsgtu.c | 88 +- .../policy/non-overloaded/vmsif.c | 14 +- .../policy/non-overloaded/vmsle.c | 88 +- .../policy/non-overloaded/vmsleu.c | 88 +- .../policy/non-overloaded/vmslt.c | 88 +- .../policy/non-overloaded/vmsltu.c | 88 +- .../policy/non-overloaded/vmsne.c | 176 ++-- .../policy/non-overloaded/vmsof.c | 14 +- .../policy/non-overloaded/vmul.c | 704 ++++++------- .../policy/non-overloaded/vmulh.c | 352 +++---- .../policy/non-overloaded/vmulhsu.c | 352 +++---- .../policy/non-overloaded/vmulhu.c | 352 +++---- .../policy/non-overloaded/vmv.c | 294 +++--- .../policy/non-overloaded/vnclip.c | 240 ++--- .../policy/non-overloaded/vnclipu.c | 240 ++--- .../policy/non-overloaded/vncvt.c | 240 ++--- .../policy/non-overloaded/vneg.c | 176 ++-- .../policy/non-overloaded/vnmsac.c | 704 ++++++------- .../policy/non-overloaded/vnmsub.c | 704 ++++++------- .../policy/non-overloaded/vnot.c | 352 +++---- .../policy/non-overloaded/vnsra.c | 240 ++--- .../policy/non-overloaded/vnsrl.c | 240 ++--- .../policy/non-overloaded/vor.c | 704 ++++++------- .../policy/non-overloaded/vredand.c | 176 ++-- .../policy/non-overloaded/vredmax.c | 88 +- .../policy/non-overloaded/vredmaxu.c | 88 +- .../policy/non-overloaded/vredmin.c | 88 +- .../policy/non-overloaded/vredminu.c | 88 +- .../policy/non-overloaded/vredor.c | 176 ++-- .../policy/non-overloaded/vredsum.c | 176 ++-- .../policy/non-overloaded/vredxor.c | 176 ++-- .../policy/non-overloaded/vrem.c | 352 +++---- .../policy/non-overloaded/vremu.c | 352 +++---- .../policy/non-overloaded/vrgather.c | 944 +++++++++--------- .../policy/non-overloaded/vrgatherei16.c | 456 ++++----- .../policy/non-overloaded/vrsub.c | 352 +++---- .../policy/non-overloaded/vsadd.c | 352 +++---- .../policy/non-overloaded/vsaddu.c | 352 +++---- .../policy/non-overloaded/vsbc.c | 176 ++-- .../policy/non-overloaded/vsext.c | 224 ++--- .../policy/non-overloaded/vslide1down.c | 352 +++---- .../policy/non-overloaded/vslide1up.c | 352 +++---- .../policy/non-overloaded/vslidedown.c | 472 ++++----- .../policy/non-overloaded/vslideup.c | 472 ++++----- .../policy/non-overloaded/vsll.c | 704 ++++++------- .../policy/non-overloaded/vsmul.c | 352 +++---- .../policy/non-overloaded/vsra.c | 352 +++---- .../policy/non-overloaded/vsrl.c | 352 +++---- .../policy/non-overloaded/vssra.c | 352 +++---- .../policy/non-overloaded/vssrl.c | 352 +++---- .../policy/non-overloaded/vssub.c | 352 +++---- .../policy/non-overloaded/vssubu.c | 352 +++---- .../policy/non-overloaded/vsub.c | 704 ++++++------- .../policy/non-overloaded/vwadd.c | 480 ++++----- .../policy/non-overloaded/vwaddu.c | 480 ++++----- .../policy/non-overloaded/vwcvt.c | 120 +-- .../policy/non-overloaded/vwcvtu.c | 120 +-- .../policy/non-overloaded/vwmacc.c | 240 ++--- .../policy/non-overloaded/vwmaccsu.c | 240 ++--- .../policy/non-overloaded/vwmaccu.c | 240 ++--- .../policy/non-overloaded/vwmaccus.c | 120 +-- .../policy/non-overloaded/vwmul.c | 240 ++--- .../policy/non-overloaded/vwmulsu.c | 240 ++--- .../policy/non-overloaded/vwmulu.c | 240 ++--- .../policy/non-overloaded/vwredsum.c | 72 +- .../policy/non-overloaded/vwredsumu.c | 72 +- .../policy/non-overloaded/vwsub.c | 480 ++++----- .../policy/non-overloaded/vwsubu.c | 480 ++++----- .../policy/non-overloaded/vxor.c | 704 ++++++------- .../policy/non-overloaded/vzext.c | 224 ++--- .../vget-index-out-of-range.c | 136 +-- .../vget-vset-ice.cpp | 4 +- .../rvv-intrinsics-handcrafted/vmulh-eew64.c | 32 +- .../RISCV/rvv-intrinsics-handcrafted/vmulh.c | 144 +-- .../vmulhsu-eew64.c | 32 +- .../rvv-intrinsics-handcrafted/vmulhsu.c | 144 +-- .../rvv-intrinsics-handcrafted/vmulhu-eew64.c | 32 +- .../RISCV/rvv-intrinsics-handcrafted/vmulhu.c | 144 +-- .../vset-index-out-of-range.c | 136 +-- .../rvv-intrinsics-handcrafted/vsmul-eew64.c | 32 +- .../test/Sema/uninit-variables-riscv-vector.c | 12 +- 773 files changed, 56213 insertions(+), 56209 deletions(-) diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp index 2a3f30c79e68..3da35d9d2f6b 100644 --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -1002,6 +1002,10 @@ void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy, OverloadedName += suffix; }; + // This follows the naming guideline under riscv-c-api-doc to add the + // `__riscv_` suffix for all RVV intrinsics. + Name = "__riscv_" + Name; + if (IsMasked) { if (PolicyAttrs.isTUMUPolicy()) appendPolicySuffix("_tumu"); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c index 9387e4e54985..bbd3445d9e4a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaadd.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vaadd_vv_i8mf8(op1, op2, vl); + return __riscv_vaadd_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf8(op1, op2, vl); + return __riscv_vaadd_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vaadd_vv_i8mf4(op1, op2, vl); + return __riscv_vaadd_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf4(op1, op2, vl); + return __riscv_vaadd_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vaadd_vv_i8mf2(op1, op2, vl); + return __riscv_vaadd_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf2(op1, op2, vl); + return __riscv_vaadd_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vaadd_vv_i8m1(op1, op2, vl); + return __riscv_vaadd_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m1(op1, op2, vl); + return __riscv_vaadd_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vaadd_vv_i8m2(op1, op2, vl); + return __riscv_vaadd_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m2(op1, op2, vl); + return __riscv_vaadd_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vaadd_vv_i8m4(op1, op2, vl); + return __riscv_vaadd_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m4(op1, op2, vl); + return __riscv_vaadd_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vaadd_vv_i8m8(op1, op2, vl); + return __riscv_vaadd_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m8(op1, op2, vl); + return __riscv_vaadd_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vaadd_vv_i16mf4(op1, op2, vl); + return __riscv_vaadd_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf4(op1, op2, vl); + return __riscv_vaadd_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vaadd_vv_i16mf2(op1, op2, vl); + return __riscv_vaadd_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf2(op1, op2, vl); + return __riscv_vaadd_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vaadd_vv_i16m1(op1, op2, vl); + return __riscv_vaadd_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m1(op1, op2, vl); + return __riscv_vaadd_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vaadd_vv_i16m2(op1, op2, vl); + return __riscv_vaadd_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m2(op1, op2, vl); + return __riscv_vaadd_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vaadd_vv_i16m4(op1, op2, vl); + return __riscv_vaadd_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m4(op1, op2, vl); + return __riscv_vaadd_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vaadd_vv_i16m8(op1, op2, vl); + return __riscv_vaadd_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m8(op1, op2, vl); + return __riscv_vaadd_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vaadd_vv_i32mf2(op1, op2, vl); + return __riscv_vaadd_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32mf2(op1, op2, vl); + return __riscv_vaadd_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vaadd_vv_i32m1(op1, op2, vl); + return __riscv_vaadd_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m1(op1, op2, vl); + return __riscv_vaadd_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vaadd_vv_i32m2(op1, op2, vl); + return __riscv_vaadd_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m2(op1, op2, vl); + return __riscv_vaadd_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vaadd_vv_i32m4(op1, op2, vl); + return __riscv_vaadd_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m4(op1, op2, vl); + return __riscv_vaadd_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vaadd_vv_i32m8(op1, op2, vl); + return __riscv_vaadd_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m8(op1, op2, vl); + return __riscv_vaadd_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vaadd_vv_i64m1(op1, op2, vl); + return __riscv_vaadd_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m1(op1, op2, vl); + return __riscv_vaadd_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vaadd_vv_i64m2(op1, op2, vl); + return __riscv_vaadd_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m2(op1, op2, vl); + return __riscv_vaadd_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vaadd_vv_i64m4(op1, op2, vl); + return __riscv_vaadd_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m4(op1, op2, vl); + return __riscv_vaadd_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vaadd_vv_i64m8(op1, op2, vl); + return __riscv_vaadd_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m8(op1, op2, vl); + return __riscv_vaadd_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vaadd_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vaadd_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vaadd_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vaadd_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vaadd_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vaadd_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vaadd_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vaadd_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vaadd_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vaadd_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vaadd_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vaadd_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vaadd_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vaadd_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vaadd_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vaadd_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vaadd_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vaadd_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vaadd_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vaadd_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vaadd_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vaadd_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vaadd_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vaadd_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c index 618bc7bde5ab..65ed52ca4ee2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaaddu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vaaddu_vv_u8mf8(op1, op2, vl); + return __riscv_vaaddu_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf8(op1, op2, vl); + return __riscv_vaaddu_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vaaddu_vv_u8mf4(op1, op2, vl); + return __riscv_vaaddu_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf4(op1, op2, vl); + return __riscv_vaaddu_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vaaddu_vv_u8mf2(op1, op2, vl); + return __riscv_vaaddu_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf2(op1, op2, vl); + return __riscv_vaaddu_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vaaddu_vv_u8m1(op1, op2, vl); + return __riscv_vaaddu_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m1(op1, op2, vl); + return __riscv_vaaddu_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vaaddu_vv_u8m2(op1, op2, vl); + return __riscv_vaaddu_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m2(op1, op2, vl); + return __riscv_vaaddu_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vaaddu_vv_u8m4(op1, op2, vl); + return __riscv_vaaddu_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m4(op1, op2, vl); + return __riscv_vaaddu_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vaaddu_vv_u8m8(op1, op2, vl); + return __riscv_vaaddu_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m8(op1, op2, vl); + return __riscv_vaaddu_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vaaddu_vv_u16mf4(op1, op2, vl); + return __riscv_vaaddu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf4(op1, op2, vl); + return __riscv_vaaddu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vaaddu_vv_u16mf2(op1, op2, vl); + return __riscv_vaaddu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf2(op1, op2, vl); + return __riscv_vaaddu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vaaddu_vv_u16m1(op1, op2, vl); + return __riscv_vaaddu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m1(op1, op2, vl); + return __riscv_vaaddu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vaaddu_vv_u16m2(op1, op2, vl); + return __riscv_vaaddu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m2(op1, op2, vl); + return __riscv_vaaddu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vaaddu_vv_u16m4(op1, op2, vl); + return __riscv_vaaddu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m4(op1, op2, vl); + return __riscv_vaaddu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vaaddu_vv_u16m8(op1, op2, vl); + return __riscv_vaaddu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m8(op1, op2, vl); + return __riscv_vaaddu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vaaddu_vv_u32mf2(op1, op2, vl); + return __riscv_vaaddu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32mf2(op1, op2, vl); + return __riscv_vaaddu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vaaddu_vv_u32m1(op1, op2, vl); + return __riscv_vaaddu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m1(op1, op2, vl); + return __riscv_vaaddu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vaaddu_vv_u32m2(op1, op2, vl); + return __riscv_vaaddu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m2(op1, op2, vl); + return __riscv_vaaddu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vaaddu_vv_u32m4(op1, op2, vl); + return __riscv_vaaddu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m4(op1, op2, vl); + return __riscv_vaaddu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vaaddu_vv_u32m8(op1, op2, vl); + return __riscv_vaaddu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m8(op1, op2, vl); + return __riscv_vaaddu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1( @@ -336,7 +336,7 @@ vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vaaddu_vv_u64m1(op1, op2, vl); + return __riscv_vaaddu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1( @@ -345,7 +345,7 @@ vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m1(op1, op2, vl); + return __riscv_vaaddu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2( @@ -354,7 +354,7 @@ vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vaaddu_vv_u64m2(op1, op2, vl); + return __riscv_vaaddu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2( @@ -363,7 +363,7 @@ vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m2(op1, op2, vl); + return __riscv_vaaddu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4( @@ -372,7 +372,7 @@ vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vaaddu_vv_u64m4(op1, op2, vl); + return __riscv_vaaddu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4( @@ -381,7 +381,7 @@ vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m4(op1, op2, vl); + return __riscv_vaaddu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8( @@ -390,7 +390,7 @@ vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vaaddu_vv_u64m8(op1, op2, vl); + return __riscv_vaaddu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m8(op1, op2, vl); + return __riscv_vaaddu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vaaddu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( @@ -417,7 +417,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( @@ -426,7 +426,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vaaddu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( @@ -435,7 +435,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( @@ -444,7 +444,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vaaddu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( @@ -453,7 +453,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( @@ -462,7 +462,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vaaddu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( @@ -471,7 +471,7 @@ vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( @@ -480,7 +480,7 @@ vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vaaddu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( @@ -489,7 +489,7 @@ vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( @@ -498,7 +498,7 @@ vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vaaddu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( @@ -507,7 +507,7 @@ vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( @@ -516,7 +516,7 @@ vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vaaddu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( @@ -525,7 +525,7 @@ vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( @@ -534,7 +534,7 @@ vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vaaddu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( @@ -543,7 +543,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( @@ -552,7 +552,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vaaddu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( @@ -561,7 +561,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( @@ -570,7 +570,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vaaddu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( @@ -579,7 +579,7 @@ vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( @@ -588,7 +588,7 @@ vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vaaddu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( @@ -597,7 +597,7 @@ vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( @@ -606,7 +606,7 @@ vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vaaddu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( @@ -615,7 +615,7 @@ vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( @@ -624,7 +624,7 @@ vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vaaddu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( @@ -633,7 +633,7 @@ vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( @@ -642,7 +642,7 @@ vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vaaddu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( @@ -651,7 +651,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( @@ -660,7 +660,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vaaddu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( @@ -669,7 +669,7 @@ vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( @@ -678,7 +678,7 @@ vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vaaddu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( @@ -687,7 +687,7 @@ vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( @@ -696,7 +696,7 @@ vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vaaddu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( @@ -705,7 +705,7 @@ vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( @@ -714,7 +714,7 @@ vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vaaddu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( @@ -723,7 +723,7 @@ vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( @@ -732,7 +732,7 @@ vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vaaddu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( @@ -741,7 +741,7 @@ vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( @@ -750,7 +750,7 @@ vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vaaddu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( @@ -759,7 +759,7 @@ vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( @@ -768,7 +768,7 @@ vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vaaddu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( @@ -777,7 +777,7 @@ vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( @@ -786,7 +786,7 @@ vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vaaddu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vaaddu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vaaddu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadc.c index 9422f43b425f..66f5eb6e4f6f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadc.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_i8mf8(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8mf8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_i8mf8(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8mf8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_i8mf4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8mf4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_i8mf4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8mf4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_i8mf2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_i8mf2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_i8m1(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_i8m1(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_i8m2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_i8m2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) { - return vadc_vvm_i8m4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) { - return vadc_vxm_i8m4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) { - return vadc_vvm_i8m8(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) { - return vadc_vxm_i8m8(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_i16mf4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16mf4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_i16mf4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16mf4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_i16mf2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_i16mf2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_i16m1(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_i16m1(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_i16m2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_i16m2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_i16m4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_i16m4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) { - return vadc_vvm_i16m8(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) { - return vadc_vxm_i16m8(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_i32mf2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i32mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_i32mf2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i32mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_i32m1(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i32m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_i32m1(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i32m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_i32m2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i32m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_i32m2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i32m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_i32m4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i32m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_i32m4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i32m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_i32m8(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i32m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_i32m8(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i32m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_i64m1(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i64m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_i64m1(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i64m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_i64m2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i64m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_i64m2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i64m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_i64m4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i64m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_i64m4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i64m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_i64m8(op1, op2, carryin, vl); + return __riscv_vadc_vvm_i64m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_i64m8(op1, op2, carryin, vl); + return __riscv_vadc_vxm_i64m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8( @@ -408,7 +408,7 @@ vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_u8mf8(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8mf8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8( @@ -417,7 +417,7 @@ vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_u8mf8(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8mf8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4( @@ -426,7 +426,7 @@ vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_u8mf4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8mf4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4( @@ -435,7 +435,7 @@ vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_u8mf4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8mf4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2( @@ -444,7 +444,7 @@ vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_u8mf2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2( @@ -453,7 +453,7 @@ vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_u8mf2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8m1( @@ -462,7 +462,7 @@ vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_u8m1(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8m1( @@ -471,7 +471,7 @@ vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_u8m1(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8m2( @@ -480,7 +480,7 @@ vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_u8m2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8m2( @@ -489,7 +489,7 @@ vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_u8m2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8m4( @@ -498,7 +498,7 @@ vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) { - return vadc_vvm_u8m4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8m4( @@ -507,7 +507,7 @@ vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) { - return vadc_vxm_u8m4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8m8( @@ -516,7 +516,7 @@ vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) { - return vadc_vvm_u8m8(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8m8( @@ -525,7 +525,7 @@ vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) { - return vadc_vxm_u8m8(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4( @@ -534,7 +534,7 @@ vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_u16mf4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16mf4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4( @@ -543,7 +543,7 @@ vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_u16mf4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16mf4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2( @@ -552,7 +552,7 @@ vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_u16mf2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2( @@ -561,7 +561,7 @@ vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_u16mf2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16m1( @@ -570,7 +570,7 @@ vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_u16m1(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16m1( @@ -579,7 +579,7 @@ vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_u16m1(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16m2( @@ -588,7 +588,7 @@ vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_u16m2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16m2( @@ -597,7 +597,7 @@ vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carry // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_u16m2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16m4( @@ -606,7 +606,7 @@ vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_u16m4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16m4( @@ -615,7 +615,7 @@ vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carry // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_u16m4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16m8( @@ -624,7 +624,7 @@ vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) { - return vadc_vvm_u16m8(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16m8( @@ -633,7 +633,7 @@ vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carry // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) { - return vadc_vxm_u16m8(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2( @@ -642,7 +642,7 @@ vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_u32mf2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u32mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2( @@ -651,7 +651,7 @@ vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_u32mf2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u32mf2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u32m1( @@ -660,7 +660,7 @@ vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_u32m1(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u32m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u32m1( @@ -669,7 +669,7 @@ vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_u32m1(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u32m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u32m2( @@ -678,7 +678,7 @@ vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_u32m2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u32m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u32m2( @@ -687,7 +687,7 @@ vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_u32m2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u32m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u32m4( @@ -696,7 +696,7 @@ vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_u32m4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u32m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u32m4( @@ -705,7 +705,7 @@ vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carry // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_u32m4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u32m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u32m8( @@ -714,7 +714,7 @@ vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_u32m8(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u32m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u32m8( @@ -723,7 +723,7 @@ vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carry // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_u32m8(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u32m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u64m1( @@ -732,7 +732,7 @@ vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_u64m1(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u64m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u64m1( @@ -741,7 +741,7 @@ vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_u64m1(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u64m1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u64m2( @@ -750,7 +750,7 @@ vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_u64m2(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u64m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u64m2( @@ -759,7 +759,7 @@ vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_u64m2(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u64m2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u64m4( @@ -768,7 +768,7 @@ vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_u64m4(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u64m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u64m4( @@ -777,7 +777,7 @@ vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_u64m4(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u64m4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u64m8( @@ -786,7 +786,7 @@ vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_u64m8(op1, op2, carryin, vl); + return __riscv_vadc_vvm_u64m8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u64m8( @@ -795,6 +795,6 @@ vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carry // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_u64m8(op1, op2, carryin, vl); + return __riscv_vadc_vxm_u64m8(op1, op2, carryin, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c index c0640594c485..0085b21aee12 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vadd.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd_vv_i8mf8(op1, op2, vl); + return __riscv_vadd_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf8(op1, op2, vl); + return __riscv_vadd_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd_vv_i8mf4(op1, op2, vl); + return __riscv_vadd_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf4(op1, op2, vl); + return __riscv_vadd_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd_vv_i8mf2(op1, op2, vl); + return __riscv_vadd_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf2(op1, op2, vl); + return __riscv_vadd_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd_vv_i8m1(op1, op2, vl); + return __riscv_vadd_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m1(op1, op2, vl); + return __riscv_vadd_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd_vv_i8m2(op1, op2, vl); + return __riscv_vadd_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m2(op1, op2, vl); + return __riscv_vadd_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd_vv_i8m4(op1, op2, vl); + return __riscv_vadd_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m4(op1, op2, vl); + return __riscv_vadd_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd_vv_i8m8(op1, op2, vl); + return __riscv_vadd_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m8(op1, op2, vl); + return __riscv_vadd_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd_vv_i16mf4(op1, op2, vl); + return __riscv_vadd_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf4(op1, op2, vl); + return __riscv_vadd_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd_vv_i16mf2(op1, op2, vl); + return __riscv_vadd_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf2(op1, op2, vl); + return __riscv_vadd_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd_vv_i16m1(op1, op2, vl); + return __riscv_vadd_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m1(op1, op2, vl); + return __riscv_vadd_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd_vv_i16m2(op1, op2, vl); + return __riscv_vadd_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m2(op1, op2, vl); + return __riscv_vadd_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd_vv_i16m4(op1, op2, vl); + return __riscv_vadd_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m4(op1, op2, vl); + return __riscv_vadd_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd_vv_i16m8(op1, op2, vl); + return __riscv_vadd_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m8(op1, op2, vl); + return __riscv_vadd_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd_vv_i32mf2(op1, op2, vl); + return __riscv_vadd_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32mf2(op1, op2, vl); + return __riscv_vadd_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd_vv_i32m1(op1, op2, vl); + return __riscv_vadd_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m1(op1, op2, vl); + return __riscv_vadd_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd_vv_i32m2(op1, op2, vl); + return __riscv_vadd_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m2(op1, op2, vl); + return __riscv_vadd_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd_vv_i32m4(op1, op2, vl); + return __riscv_vadd_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m4(op1, op2, vl); + return __riscv_vadd_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd_vv_i32m8(op1, op2, vl); + return __riscv_vadd_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m8(op1, op2, vl); + return __riscv_vadd_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd_vv_i64m1(op1, op2, vl); + return __riscv_vadd_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m1(op1, op2, vl); + return __riscv_vadd_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd_vv_i64m2(op1, op2, vl); + return __riscv_vadd_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m2(op1, op2, vl); + return __riscv_vadd_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd_vv_i64m4(op1, op2, vl); + return __riscv_vadd_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m4(op1, op2, vl); + return __riscv_vadd_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd_vv_i64m8(op1, op2, vl); + return __riscv_vadd_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m8(op1, op2, vl); + return __riscv_vadd_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( @@ -408,7 +408,7 @@ vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd_vv_u8mf8(op1, op2, vl); + return __riscv_vadd_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( @@ -417,7 +417,7 @@ vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf8(op1, op2, vl); + return __riscv_vadd_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( @@ -426,7 +426,7 @@ vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd_vv_u8mf4(op1, op2, vl); + return __riscv_vadd_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( @@ -435,7 +435,7 @@ vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf4(op1, op2, vl); + return __riscv_vadd_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( @@ -444,7 +444,7 @@ vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd_vv_u8mf2(op1, op2, vl); + return __riscv_vadd_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( @@ -453,7 +453,7 @@ vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf2(op1, op2, vl); + return __riscv_vadd_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( @@ -462,7 +462,7 @@ vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd_vv_u8m1(op1, op2, vl); + return __riscv_vadd_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( @@ -471,7 +471,7 @@ vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m1(op1, op2, vl); + return __riscv_vadd_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( @@ -480,7 +480,7 @@ vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd_vv_u8m2(op1, op2, vl); + return __riscv_vadd_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( @@ -489,7 +489,7 @@ vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m2(op1, op2, vl); + return __riscv_vadd_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( @@ -498,7 +498,7 @@ vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd_vv_u8m4(op1, op2, vl); + return __riscv_vadd_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( @@ -507,7 +507,7 @@ vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m4(op1, op2, vl); + return __riscv_vadd_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( @@ -516,7 +516,7 @@ vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd_vv_u8m8(op1, op2, vl); + return __riscv_vadd_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( @@ -525,7 +525,7 @@ vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m8(op1, op2, vl); + return __riscv_vadd_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( @@ -534,7 +534,7 @@ vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd_vv_u16mf4(op1, op2, vl); + return __riscv_vadd_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( @@ -543,7 +543,7 @@ vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf4(op1, op2, vl); + return __riscv_vadd_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( @@ -552,7 +552,7 @@ vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd_vv_u16mf2(op1, op2, vl); + return __riscv_vadd_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( @@ -561,7 +561,7 @@ vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf2(op1, op2, vl); + return __riscv_vadd_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( @@ -570,7 +570,7 @@ vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd_vv_u16m1(op1, op2, vl); + return __riscv_vadd_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( @@ -579,7 +579,7 @@ vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m1(op1, op2, vl); + return __riscv_vadd_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( @@ -588,7 +588,7 @@ vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd_vv_u16m2(op1, op2, vl); + return __riscv_vadd_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( @@ -597,7 +597,7 @@ vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m2(op1, op2, vl); + return __riscv_vadd_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( @@ -606,7 +606,7 @@ vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd_vv_u16m4(op1, op2, vl); + return __riscv_vadd_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( @@ -615,7 +615,7 @@ vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m4(op1, op2, vl); + return __riscv_vadd_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( @@ -624,7 +624,7 @@ vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd_vv_u16m8(op1, op2, vl); + return __riscv_vadd_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( @@ -633,7 +633,7 @@ vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m8(op1, op2, vl); + return __riscv_vadd_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( @@ -642,7 +642,7 @@ vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd_vv_u32mf2(op1, op2, vl); + return __riscv_vadd_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( @@ -651,7 +651,7 @@ vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32mf2(op1, op2, vl); + return __riscv_vadd_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( @@ -660,7 +660,7 @@ vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd_vv_u32m1(op1, op2, vl); + return __riscv_vadd_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( @@ -669,7 +669,7 @@ vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m1(op1, op2, vl); + return __riscv_vadd_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( @@ -678,7 +678,7 @@ vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd_vv_u32m2(op1, op2, vl); + return __riscv_vadd_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( @@ -687,7 +687,7 @@ vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m2(op1, op2, vl); + return __riscv_vadd_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( @@ -696,7 +696,7 @@ vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd_vv_u32m4(op1, op2, vl); + return __riscv_vadd_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( @@ -705,7 +705,7 @@ vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m4(op1, op2, vl); + return __riscv_vadd_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( @@ -714,7 +714,7 @@ vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd_vv_u32m8(op1, op2, vl); + return __riscv_vadd_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( @@ -723,7 +723,7 @@ vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m8(op1, op2, vl); + return __riscv_vadd_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( @@ -732,7 +732,7 @@ vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd_vv_u64m1(op1, op2, vl); + return __riscv_vadd_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( @@ -741,7 +741,7 @@ vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m1(op1, op2, vl); + return __riscv_vadd_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( @@ -750,7 +750,7 @@ vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd_vv_u64m2(op1, op2, vl); + return __riscv_vadd_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( @@ -759,7 +759,7 @@ vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m2(op1, op2, vl); + return __riscv_vadd_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( @@ -768,7 +768,7 @@ vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd_vv_u64m4(op1, op2, vl); + return __riscv_vadd_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( @@ -777,7 +777,7 @@ vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m4(op1, op2, vl); + return __riscv_vadd_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( @@ -786,7 +786,7 @@ vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd_vv_u64m8(op1, op2, vl); + return __riscv_vadd_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( @@ -795,7 +795,7 @@ vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m8(op1, op2, vl); + return __riscv_vadd_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( @@ -804,7 +804,7 @@ vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( @@ -813,7 +813,7 @@ vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( @@ -822,7 +822,7 @@ vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( @@ -831,7 +831,7 @@ vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( @@ -840,7 +840,7 @@ vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( @@ -849,7 +849,7 @@ vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( @@ -858,7 +858,7 @@ vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( @@ -867,7 +867,7 @@ vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( @@ -876,7 +876,7 @@ vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( @@ -885,7 +885,7 @@ vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( @@ -894,7 +894,7 @@ vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( @@ -903,7 +903,7 @@ vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( @@ -912,7 +912,7 @@ vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( @@ -921,7 +921,7 @@ vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( @@ -930,7 +930,7 @@ vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( @@ -939,7 +939,7 @@ vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( @@ -948,7 +948,7 @@ vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( @@ -957,7 +957,7 @@ vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( @@ -966,7 +966,7 @@ vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( @@ -975,7 +975,7 @@ vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( @@ -984,7 +984,7 @@ vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( @@ -993,7 +993,7 @@ vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( @@ -1002,7 +1002,7 @@ vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( @@ -1011,7 +1011,7 @@ vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( @@ -1020,7 +1020,7 @@ vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( @@ -1029,7 +1029,7 @@ vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( @@ -1038,7 +1038,7 @@ vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( @@ -1065,7 +1065,7 @@ vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( @@ -1074,7 +1074,7 @@ vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( @@ -1083,7 +1083,7 @@ vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( @@ -1092,7 +1092,7 @@ vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( @@ -1101,7 +1101,7 @@ vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( @@ -1110,7 +1110,7 @@ vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( @@ -1119,7 +1119,7 @@ vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( @@ -1128,7 +1128,7 @@ vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( @@ -1137,7 +1137,7 @@ vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( @@ -1146,7 +1146,7 @@ vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( @@ -1155,7 +1155,7 @@ vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( @@ -1164,7 +1164,7 @@ vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( @@ -1173,7 +1173,7 @@ vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( @@ -1182,7 +1182,7 @@ vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vadd_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( @@ -1191,7 +1191,7 @@ vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vadd_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( @@ -1200,7 +1200,7 @@ vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vadd_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vadd_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c index 88f0ddef6d16..8ca368819414 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vand.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vand_vv_i8mf8(op1, op2, vl); + return __riscv_vand_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf8(op1, op2, vl); + return __riscv_vand_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vand_vv_i8mf4(op1, op2, vl); + return __riscv_vand_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf4(op1, op2, vl); + return __riscv_vand_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vand_vv_i8mf2(op1, op2, vl); + return __riscv_vand_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf2(op1, op2, vl); + return __riscv_vand_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vand_vv_i8m1(op1, op2, vl); + return __riscv_vand_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m1(op1, op2, vl); + return __riscv_vand_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vand_vv_i8m2(op1, op2, vl); + return __riscv_vand_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m2(op1, op2, vl); + return __riscv_vand_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vand_vv_i8m4(op1, op2, vl); + return __riscv_vand_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m4(op1, op2, vl); + return __riscv_vand_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vand_vv_i8m8(op1, op2, vl); + return __riscv_vand_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m8(op1, op2, vl); + return __riscv_vand_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vand_vv_i16mf4(op1, op2, vl); + return __riscv_vand_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf4(op1, op2, vl); + return __riscv_vand_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vand_vv_i16mf2(op1, op2, vl); + return __riscv_vand_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf2(op1, op2, vl); + return __riscv_vand_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vand_vv_i16m1(op1, op2, vl); + return __riscv_vand_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m1(op1, op2, vl); + return __riscv_vand_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vand_vv_i16m2(op1, op2, vl); + return __riscv_vand_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m2(op1, op2, vl); + return __riscv_vand_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vand_vv_i16m4(op1, op2, vl); + return __riscv_vand_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m4(op1, op2, vl); + return __riscv_vand_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vand_vv_i16m8(op1, op2, vl); + return __riscv_vand_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m8(op1, op2, vl); + return __riscv_vand_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vand_vv_i32mf2(op1, op2, vl); + return __riscv_vand_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32mf2(op1, op2, vl); + return __riscv_vand_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vand_vv_i32m1(op1, op2, vl); + return __riscv_vand_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m1(op1, op2, vl); + return __riscv_vand_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vand_vv_i32m2(op1, op2, vl); + return __riscv_vand_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m2(op1, op2, vl); + return __riscv_vand_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vand_vv_i32m4(op1, op2, vl); + return __riscv_vand_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m4(op1, op2, vl); + return __riscv_vand_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vand_vv_i32m8(op1, op2, vl); + return __riscv_vand_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m8(op1, op2, vl); + return __riscv_vand_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vand_vv_i64m1(op1, op2, vl); + return __riscv_vand_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m1(op1, op2, vl); + return __riscv_vand_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vand_vv_i64m2(op1, op2, vl); + return __riscv_vand_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m2(op1, op2, vl); + return __riscv_vand_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vand_vv_i64m4(op1, op2, vl); + return __riscv_vand_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m4(op1, op2, vl); + return __riscv_vand_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vand_vv_i64m8(op1, op2, vl); + return __riscv_vand_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m8(op1, op2, vl); + return __riscv_vand_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf8( @@ -408,7 +408,7 @@ vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vand_vv_u8mf8(op1, op2, vl); + return __riscv_vand_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf8( @@ -417,7 +417,7 @@ vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf8(op1, op2, vl); + return __riscv_vand_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf4( @@ -426,7 +426,7 @@ vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vand_vv_u8mf4(op1, op2, vl); + return __riscv_vand_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf4( @@ -435,7 +435,7 @@ vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf4(op1, op2, vl); + return __riscv_vand_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf2( @@ -444,7 +444,7 @@ vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vand_vv_u8mf2(op1, op2, vl); + return __riscv_vand_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf2( @@ -453,7 +453,7 @@ vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf2(op1, op2, vl); + return __riscv_vand_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m1( @@ -462,7 +462,7 @@ vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vand_vv_u8m1(op1, op2, vl); + return __riscv_vand_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m1( @@ -471,7 +471,7 @@ vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m1(op1, op2, vl); + return __riscv_vand_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m2( @@ -480,7 +480,7 @@ vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vand_vv_u8m2(op1, op2, vl); + return __riscv_vand_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m2( @@ -489,7 +489,7 @@ vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m2(op1, op2, vl); + return __riscv_vand_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m4( @@ -498,7 +498,7 @@ vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vand_vv_u8m4(op1, op2, vl); + return __riscv_vand_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m4( @@ -507,7 +507,7 @@ vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m4(op1, op2, vl); + return __riscv_vand_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m8( @@ -516,7 +516,7 @@ vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vand_vv_u8m8(op1, op2, vl); + return __riscv_vand_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m8( @@ -525,7 +525,7 @@ vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m8(op1, op2, vl); + return __riscv_vand_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf4( @@ -534,7 +534,7 @@ vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vand_vv_u16mf4(op1, op2, vl); + return __riscv_vand_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf4( @@ -543,7 +543,7 @@ vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf4(op1, op2, vl); + return __riscv_vand_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf2( @@ -552,7 +552,7 @@ vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vand_vv_u16mf2(op1, op2, vl); + return __riscv_vand_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf2( @@ -561,7 +561,7 @@ vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf2(op1, op2, vl); + return __riscv_vand_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m1( @@ -570,7 +570,7 @@ vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vand_vv_u16m1(op1, op2, vl); + return __riscv_vand_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m1( @@ -579,7 +579,7 @@ vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m1(op1, op2, vl); + return __riscv_vand_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m2( @@ -588,7 +588,7 @@ vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vand_vv_u16m2(op1, op2, vl); + return __riscv_vand_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m2( @@ -597,7 +597,7 @@ vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m2(op1, op2, vl); + return __riscv_vand_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m4( @@ -606,7 +606,7 @@ vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vand_vv_u16m4(op1, op2, vl); + return __riscv_vand_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m4( @@ -615,7 +615,7 @@ vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m4(op1, op2, vl); + return __riscv_vand_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m8( @@ -624,7 +624,7 @@ vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vand_vv_u16m8(op1, op2, vl); + return __riscv_vand_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m8( @@ -633,7 +633,7 @@ vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m8(op1, op2, vl); + return __riscv_vand_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32mf2( @@ -642,7 +642,7 @@ vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vand_vv_u32mf2(op1, op2, vl); + return __riscv_vand_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32mf2( @@ -651,7 +651,7 @@ vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32mf2(op1, op2, vl); + return __riscv_vand_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m1( @@ -660,7 +660,7 @@ vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vand_vv_u32m1(op1, op2, vl); + return __riscv_vand_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m1( @@ -669,7 +669,7 @@ vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m1(op1, op2, vl); + return __riscv_vand_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m2( @@ -678,7 +678,7 @@ vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vand_vv_u32m2(op1, op2, vl); + return __riscv_vand_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m2( @@ -687,7 +687,7 @@ vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m2(op1, op2, vl); + return __riscv_vand_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m4( @@ -696,7 +696,7 @@ vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vand_vv_u32m4(op1, op2, vl); + return __riscv_vand_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m4( @@ -705,7 +705,7 @@ vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m4(op1, op2, vl); + return __riscv_vand_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m8( @@ -714,7 +714,7 @@ vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vand_vv_u32m8(op1, op2, vl); + return __riscv_vand_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m8( @@ -723,7 +723,7 @@ vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m8(op1, op2, vl); + return __riscv_vand_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m1( @@ -732,7 +732,7 @@ vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vand_vv_u64m1(op1, op2, vl); + return __riscv_vand_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m1( @@ -741,7 +741,7 @@ vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m1(op1, op2, vl); + return __riscv_vand_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m2( @@ -750,7 +750,7 @@ vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vand_vv_u64m2(op1, op2, vl); + return __riscv_vand_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m2( @@ -759,7 +759,7 @@ vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m2(op1, op2, vl); + return __riscv_vand_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m4( @@ -768,7 +768,7 @@ vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vand_vv_u64m4(op1, op2, vl); + return __riscv_vand_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m4( @@ -777,7 +777,7 @@ vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m4(op1, op2, vl); + return __riscv_vand_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m8( @@ -786,7 +786,7 @@ vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vand_vv_u64m8(op1, op2, vl); + return __riscv_vand_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m8( @@ -795,7 +795,7 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m8(op1, op2, vl); + return __riscv_vand_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m( @@ -804,7 +804,7 @@ vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vand_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vand_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m( @@ -813,7 +813,7 @@ vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vand_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m( @@ -822,7 +822,7 @@ vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vand_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vand_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m( @@ -831,7 +831,7 @@ vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vand_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m( @@ -840,7 +840,7 @@ vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vand_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vand_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m( @@ -849,7 +849,7 @@ vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vand_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m1_m( @@ -858,7 +858,7 @@ vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vand_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vand_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m1_m( @@ -867,7 +867,7 @@ vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vand_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m2_m( @@ -876,7 +876,7 @@ vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vand_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vand_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m2_m( @@ -885,7 +885,7 @@ vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vand_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m4_m( @@ -894,7 +894,7 @@ vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vand_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vand_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m4_m( @@ -903,7 +903,7 @@ vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vand_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m8_m( @@ -912,7 +912,7 @@ vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vand_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vand_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m8_m( @@ -921,7 +921,7 @@ vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vand_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m( @@ -930,7 +930,7 @@ vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vand_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vand_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m( @@ -939,7 +939,7 @@ vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vand_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m( @@ -948,7 +948,7 @@ vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vand_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vand_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m( @@ -957,7 +957,7 @@ vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vand_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m1_m( @@ -966,7 +966,7 @@ vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vand_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vand_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m1_m( @@ -975,7 +975,7 @@ vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vand_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m2_m( @@ -984,7 +984,7 @@ vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vand_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vand_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m2_m( @@ -993,7 +993,7 @@ vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vand_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m4_m( @@ -1002,7 +1002,7 @@ vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vand_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vand_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m4_m( @@ -1011,7 +1011,7 @@ vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vand_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m8_m( @@ -1020,7 +1020,7 @@ vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vand_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vand_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m8_m( @@ -1029,7 +1029,7 @@ vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vand_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m( @@ -1038,7 +1038,7 @@ vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vand_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vand_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vand_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m1_m( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vand_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vand_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m1_m( @@ -1065,7 +1065,7 @@ vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vand_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m2_m( @@ -1074,7 +1074,7 @@ vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vand_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vand_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m2_m( @@ -1083,7 +1083,7 @@ vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vand_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m4_m( @@ -1092,7 +1092,7 @@ vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vand_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vand_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m4_m( @@ -1101,7 +1101,7 @@ vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vand_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m8_m( @@ -1110,7 +1110,7 @@ vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vand_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vand_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m8_m( @@ -1119,7 +1119,7 @@ vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vand_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m1_m( @@ -1128,7 +1128,7 @@ vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vand_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vand_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m1_m( @@ -1137,7 +1137,7 @@ vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vand_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m2_m( @@ -1146,7 +1146,7 @@ vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vand_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vand_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m2_m( @@ -1155,7 +1155,7 @@ vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vand_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m4_m( @@ -1164,7 +1164,7 @@ vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vand_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vand_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m4_m( @@ -1173,7 +1173,7 @@ vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vand_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m8_m( @@ -1182,7 +1182,7 @@ vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vand_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vand_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m8_m( @@ -1191,7 +1191,7 @@ vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vand_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m( @@ -1200,7 +1200,7 @@ vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vand_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vand_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vand_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vand_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vand_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vand_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vand_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vand_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vand_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m1_m( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vand_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vand_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m1_m( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vand_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m2_m( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vand_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vand_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m2_m( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vand_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m4_m( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vand_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vand_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m4_m( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vand_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m8_m( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vand_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vand_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m8_m( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vand_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vand_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vand_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vand_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vand_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vand_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vand_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m1_m( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vand_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vand_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m1_m( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vand_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m2_m( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vand_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vand_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m2_m( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vand_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m4_m( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vand_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vand_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m4_m( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vand_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m8_m( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vand_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vand_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m8_m( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vand_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vand_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vand_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vand_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m1_m( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vand_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vand_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m1_m( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vand_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m2_m( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vand_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vand_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m2_m( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vand_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m4_m( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vand_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vand_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m4_m( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vand_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m8_m( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vand_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vand_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m8_m( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vand_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m1_m( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vand_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vand_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m1_m( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vand_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m2_m( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vand_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vand_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m2_m( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vand_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m4_m( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vand_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vand_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m4_m( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vand_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m8_m( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vand_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vand_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m8_m( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vand_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c index 767892167df9..0c4af21305fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasub.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vasub_vv_i8mf8(op1, op2, vl); + return __riscv_vasub_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf8(op1, op2, vl); + return __riscv_vasub_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vasub_vv_i8mf4(op1, op2, vl); + return __riscv_vasub_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf4(op1, op2, vl); + return __riscv_vasub_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vasub_vv_i8mf2(op1, op2, vl); + return __riscv_vasub_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf2(op1, op2, vl); + return __riscv_vasub_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vasub_vv_i8m1(op1, op2, vl); + return __riscv_vasub_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m1(op1, op2, vl); + return __riscv_vasub_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vasub_vv_i8m2(op1, op2, vl); + return __riscv_vasub_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m2(op1, op2, vl); + return __riscv_vasub_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vasub_vv_i8m4(op1, op2, vl); + return __riscv_vasub_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m4(op1, op2, vl); + return __riscv_vasub_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vasub_vv_i8m8(op1, op2, vl); + return __riscv_vasub_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m8(op1, op2, vl); + return __riscv_vasub_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vasub_vv_i16mf4(op1, op2, vl); + return __riscv_vasub_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf4(op1, op2, vl); + return __riscv_vasub_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vasub_vv_i16mf2(op1, op2, vl); + return __riscv_vasub_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf2(op1, op2, vl); + return __riscv_vasub_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vasub_vv_i16m1(op1, op2, vl); + return __riscv_vasub_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m1(op1, op2, vl); + return __riscv_vasub_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vasub_vv_i16m2(op1, op2, vl); + return __riscv_vasub_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m2(op1, op2, vl); + return __riscv_vasub_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vasub_vv_i16m4(op1, op2, vl); + return __riscv_vasub_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m4(op1, op2, vl); + return __riscv_vasub_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vasub_vv_i16m8(op1, op2, vl); + return __riscv_vasub_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m8(op1, op2, vl); + return __riscv_vasub_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vasub_vv_i32mf2(op1, op2, vl); + return __riscv_vasub_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32mf2(op1, op2, vl); + return __riscv_vasub_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vasub_vv_i32m1(op1, op2, vl); + return __riscv_vasub_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m1(op1, op2, vl); + return __riscv_vasub_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vasub_vv_i32m2(op1, op2, vl); + return __riscv_vasub_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m2(op1, op2, vl); + return __riscv_vasub_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vasub_vv_i32m4(op1, op2, vl); + return __riscv_vasub_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m4(op1, op2, vl); + return __riscv_vasub_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vasub_vv_i32m8(op1, op2, vl); + return __riscv_vasub_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m8(op1, op2, vl); + return __riscv_vasub_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vasub_vv_i64m1(op1, op2, vl); + return __riscv_vasub_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m1(op1, op2, vl); + return __riscv_vasub_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vasub_vv_i64m2(op1, op2, vl); + return __riscv_vasub_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m2(op1, op2, vl); + return __riscv_vasub_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vasub_vv_i64m4(op1, op2, vl); + return __riscv_vasub_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m4(op1, op2, vl); + return __riscv_vasub_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vasub_vv_i64m8(op1, op2, vl); + return __riscv_vasub_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m8(op1, op2, vl); + return __riscv_vasub_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vasub_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vasub_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vasub_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vasub_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vasub_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vasub_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vasub_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vasub_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vasub_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vasub_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vasub_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vasub_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vasub_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vasub_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vasub_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vasub_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vasub_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vasub_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vasub_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vasub_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vasub_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vasub_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vasub_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vasub_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c index b3cfeed161f1..b031be7d0b67 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vasubu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vasubu_vv_u8mf8(op1, op2, vl); + return __riscv_vasubu_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf8(op1, op2, vl); + return __riscv_vasubu_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vasubu_vv_u8mf4(op1, op2, vl); + return __riscv_vasubu_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf4(op1, op2, vl); + return __riscv_vasubu_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vasubu_vv_u8mf2(op1, op2, vl); + return __riscv_vasubu_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf2(op1, op2, vl); + return __riscv_vasubu_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vasubu_vv_u8m1(op1, op2, vl); + return __riscv_vasubu_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m1(op1, op2, vl); + return __riscv_vasubu_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vasubu_vv_u8m2(op1, op2, vl); + return __riscv_vasubu_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m2(op1, op2, vl); + return __riscv_vasubu_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vasubu_vv_u8m4(op1, op2, vl); + return __riscv_vasubu_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m4(op1, op2, vl); + return __riscv_vasubu_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vasubu_vv_u8m8(op1, op2, vl); + return __riscv_vasubu_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m8(op1, op2, vl); + return __riscv_vasubu_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vasubu_vv_u16mf4(op1, op2, vl); + return __riscv_vasubu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf4(op1, op2, vl); + return __riscv_vasubu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vasubu_vv_u16mf2(op1, op2, vl); + return __riscv_vasubu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf2(op1, op2, vl); + return __riscv_vasubu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vasubu_vv_u16m1(op1, op2, vl); + return __riscv_vasubu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m1(op1, op2, vl); + return __riscv_vasubu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vasubu_vv_u16m2(op1, op2, vl); + return __riscv_vasubu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m2(op1, op2, vl); + return __riscv_vasubu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vasubu_vv_u16m4(op1, op2, vl); + return __riscv_vasubu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m4(op1, op2, vl); + return __riscv_vasubu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vasubu_vv_u16m8(op1, op2, vl); + return __riscv_vasubu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m8(op1, op2, vl); + return __riscv_vasubu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vasubu_vv_u32mf2(op1, op2, vl); + return __riscv_vasubu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32mf2(op1, op2, vl); + return __riscv_vasubu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vasubu_vv_u32m1(op1, op2, vl); + return __riscv_vasubu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m1(op1, op2, vl); + return __riscv_vasubu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vasubu_vv_u32m2(op1, op2, vl); + return __riscv_vasubu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m2(op1, op2, vl); + return __riscv_vasubu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vasubu_vv_u32m4(op1, op2, vl); + return __riscv_vasubu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m4(op1, op2, vl); + return __riscv_vasubu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vasubu_vv_u32m8(op1, op2, vl); + return __riscv_vasubu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m8(op1, op2, vl); + return __riscv_vasubu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1( @@ -336,7 +336,7 @@ vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vasubu_vv_u64m1(op1, op2, vl); + return __riscv_vasubu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1( @@ -345,7 +345,7 @@ vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m1(op1, op2, vl); + return __riscv_vasubu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2( @@ -354,7 +354,7 @@ vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vasubu_vv_u64m2(op1, op2, vl); + return __riscv_vasubu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2( @@ -363,7 +363,7 @@ vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m2(op1, op2, vl); + return __riscv_vasubu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4( @@ -372,7 +372,7 @@ vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vasubu_vv_u64m4(op1, op2, vl); + return __riscv_vasubu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4( @@ -381,7 +381,7 @@ vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m4(op1, op2, vl); + return __riscv_vasubu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8( @@ -390,7 +390,7 @@ vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vasubu_vv_u64m8(op1, op2, vl); + return __riscv_vasubu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m8(op1, op2, vl); + return __riscv_vasubu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vasubu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( @@ -417,7 +417,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( @@ -426,7 +426,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vasubu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( @@ -435,7 +435,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( @@ -444,7 +444,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vasubu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( @@ -453,7 +453,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( @@ -462,7 +462,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vasubu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( @@ -471,7 +471,7 @@ vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( @@ -480,7 +480,7 @@ vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vasubu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( @@ -489,7 +489,7 @@ vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( @@ -498,7 +498,7 @@ vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vasubu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( @@ -507,7 +507,7 @@ vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( @@ -516,7 +516,7 @@ vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vasubu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( @@ -525,7 +525,7 @@ vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( @@ -534,7 +534,7 @@ vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vasubu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( @@ -543,7 +543,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( @@ -552,7 +552,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vasubu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( @@ -561,7 +561,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( @@ -570,7 +570,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vasubu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( @@ -579,7 +579,7 @@ vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( @@ -588,7 +588,7 @@ vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vasubu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( @@ -597,7 +597,7 @@ vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( @@ -606,7 +606,7 @@ vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vasubu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( @@ -615,7 +615,7 @@ vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( @@ -624,7 +624,7 @@ vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vasubu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( @@ -633,7 +633,7 @@ vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( @@ -642,7 +642,7 @@ vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vasubu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( @@ -651,7 +651,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( @@ -660,7 +660,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vasubu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( @@ -669,7 +669,7 @@ vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( @@ -678,7 +678,7 @@ vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vasubu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( @@ -687,7 +687,7 @@ vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( @@ -696,7 +696,7 @@ vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vasubu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( @@ -705,7 +705,7 @@ vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( @@ -714,7 +714,7 @@ vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vasubu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( @@ -723,7 +723,7 @@ vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( @@ -732,7 +732,7 @@ vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vasubu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( @@ -741,7 +741,7 @@ vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( @@ -750,7 +750,7 @@ vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vasubu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( @@ -759,7 +759,7 @@ vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( @@ -768,7 +768,7 @@ vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vasubu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( @@ -777,7 +777,7 @@ vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( @@ -786,7 +786,7 @@ vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vasubu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vasubu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vasubu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c index 8c53b7e94687..34438cef9d90 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_f16mf4(src, mask, vl); + return __riscv_vcompress_vm_f16mf4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t src, vbool64_t mask, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_f16mf2(src, mask, vl); + return __riscv_vcompress_vm_f16mf2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t src, vbool32_t mask, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_f16m1(src, mask, vl); + return __riscv_vcompress_vm_f16m1(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t src, vbool16_t mask, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_f16m2(src, mask, vl); + return __riscv_vcompress_vm_f16m2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t src, vbool8_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_f16m4(src, mask, vl); + return __riscv_vcompress_vm_f16m4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t src, vbool4_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t src, vbool2_t mask, size_t vl) { - return vcompress_vm_f16m8(src, mask, vl); + return __riscv_vcompress_vm_f16m8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t src, vbool2_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_f32mf2(src, mask, vl); + return __riscv_vcompress_vm_f32mf2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t src, vbool64_t mask, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_f32m1(src, mask, vl); + return __riscv_vcompress_vm_f32m1(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t src, vbool32_t mask, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_f32m2(src, mask, vl); + return __riscv_vcompress_vm_f32m2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t src, vbool16_t mask, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_f32m4(src, mask, vl); + return __riscv_vcompress_vm_f32m4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t src, vbool8_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_f32m8(src, mask, vl); + return __riscv_vcompress_vm_f32m8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t src, vbool4_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_f64m1(src, mask, vl); + return __riscv_vcompress_vm_f64m1(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t src, vbool64_t mask, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_f64m2(src, mask, vl); + return __riscv_vcompress_vm_f64m2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t src, vbool32_t mask, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_f64m4(src, mask, vl); + return __riscv_vcompress_vm_f64m4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t src, vbool16_t mask, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_f64m8(src, mask, vl); + return __riscv_vcompress_vm_f64m8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8( @@ -148,7 +148,7 @@ vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t src, vbool8_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_i8mf8(src, mask, vl); + return __riscv_vcompress_vm_i8mf8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4( @@ -157,7 +157,7 @@ vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t src, vbool64_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_i8mf4(src, mask, vl); + return __riscv_vcompress_vm_i8mf4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2( @@ -166,7 +166,7 @@ vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t src, vbool32_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_i8mf2(src, mask, vl); + return __riscv_vcompress_vm_i8mf2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1( @@ -175,7 +175,7 @@ vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t src, vbool16_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vcompress_vm_i8m1(vint8m1_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_i8m1(src, mask, vl); + return __riscv_vcompress_vm_i8m1(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2( @@ -184,7 +184,7 @@ vint8m1_t test_vcompress_vm_i8m1(vint8m1_t src, vbool8_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vcompress_vm_i8m2(vint8m2_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_i8m2(src, mask, vl); + return __riscv_vcompress_vm_i8m2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4( @@ -193,7 +193,7 @@ vint8m2_t test_vcompress_vm_i8m2(vint8m2_t src, vbool4_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vcompress_vm_i8m4(vint8m4_t src, vbool2_t mask, size_t vl) { - return vcompress_vm_i8m4(src, mask, vl); + return __riscv_vcompress_vm_i8m4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8( @@ -202,7 +202,7 @@ vint8m4_t test_vcompress_vm_i8m4(vint8m4_t src, vbool2_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vcompress_vm_i8m8(vint8m8_t src, vbool1_t mask, size_t vl) { - return vcompress_vm_i8m8(src, mask, vl); + return __riscv_vcompress_vm_i8m8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4( @@ -211,7 +211,7 @@ vint8m8_t test_vcompress_vm_i8m8(vint8m8_t src, vbool1_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_i16mf4(src, mask, vl); + return __riscv_vcompress_vm_i16mf4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2( @@ -220,7 +220,7 @@ vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t src, vbool64_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_i16mf2(src, mask, vl); + return __riscv_vcompress_vm_i16mf2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1( @@ -229,7 +229,7 @@ vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t src, vbool32_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vcompress_vm_i16m1(vint16m1_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_i16m1(src, mask, vl); + return __riscv_vcompress_vm_i16m1(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2( @@ -238,7 +238,7 @@ vint16m1_t test_vcompress_vm_i16m1(vint16m1_t src, vbool16_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vcompress_vm_i16m2(vint16m2_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_i16m2(src, mask, vl); + return __riscv_vcompress_vm_i16m2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4( @@ -247,7 +247,7 @@ vint16m2_t test_vcompress_vm_i16m2(vint16m2_t src, vbool8_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vcompress_vm_i16m4(vint16m4_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_i16m4(src, mask, vl); + return __riscv_vcompress_vm_i16m4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8( @@ -256,7 +256,7 @@ vint16m4_t test_vcompress_vm_i16m4(vint16m4_t src, vbool4_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vcompress_vm_i16m8(vint16m8_t src, vbool2_t mask, size_t vl) { - return vcompress_vm_i16m8(src, mask, vl); + return __riscv_vcompress_vm_i16m8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2( @@ -265,7 +265,7 @@ vint16m8_t test_vcompress_vm_i16m8(vint16m8_t src, vbool2_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_i32mf2(src, mask, vl); + return __riscv_vcompress_vm_i32mf2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1( @@ -274,7 +274,7 @@ vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t src, vbool64_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vcompress_vm_i32m1(vint32m1_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_i32m1(src, mask, vl); + return __riscv_vcompress_vm_i32m1(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2( @@ -283,7 +283,7 @@ vint32m1_t test_vcompress_vm_i32m1(vint32m1_t src, vbool32_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vcompress_vm_i32m2(vint32m2_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_i32m2(src, mask, vl); + return __riscv_vcompress_vm_i32m2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4( @@ -292,7 +292,7 @@ vint32m2_t test_vcompress_vm_i32m2(vint32m2_t src, vbool16_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vcompress_vm_i32m4(vint32m4_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_i32m4(src, mask, vl); + return __riscv_vcompress_vm_i32m4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8( @@ -301,7 +301,7 @@ vint32m4_t test_vcompress_vm_i32m4(vint32m4_t src, vbool8_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vcompress_vm_i32m8(vint32m8_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_i32m8(src, mask, vl); + return __riscv_vcompress_vm_i32m8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1( @@ -310,7 +310,7 @@ vint32m8_t test_vcompress_vm_i32m8(vint32m8_t src, vbool4_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vcompress_vm_i64m1(vint64m1_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_i64m1(src, mask, vl); + return __riscv_vcompress_vm_i64m1(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2( @@ -319,7 +319,7 @@ vint64m1_t test_vcompress_vm_i64m1(vint64m1_t src, vbool64_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vcompress_vm_i64m2(vint64m2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_i64m2(src, mask, vl); + return __riscv_vcompress_vm_i64m2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4( @@ -328,7 +328,7 @@ vint64m2_t test_vcompress_vm_i64m2(vint64m2_t src, vbool32_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vcompress_vm_i64m4(vint64m4_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_i64m4(src, mask, vl); + return __riscv_vcompress_vm_i64m4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8( @@ -337,7 +337,7 @@ vint64m4_t test_vcompress_vm_i64m4(vint64m4_t src, vbool16_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vcompress_vm_i64m8(vint64m8_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_i64m8(src, mask, vl); + return __riscv_vcompress_vm_i64m8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8( @@ -346,7 +346,7 @@ vint64m8_t test_vcompress_vm_i64m8(vint64m8_t src, vbool8_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_u8mf8(src, mask, vl); + return __riscv_vcompress_vm_u8mf8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4( @@ -355,7 +355,7 @@ vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t src, vbool64_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_u8mf4(src, mask, vl); + return __riscv_vcompress_vm_u8mf4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2( @@ -364,7 +364,7 @@ vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t src, vbool32_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_u8mf2(src, mask, vl); + return __riscv_vcompress_vm_u8mf2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1( @@ -373,7 +373,7 @@ vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t src, vbool16_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_u8m1(src, mask, vl); + return __riscv_vcompress_vm_u8m1(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2( @@ -382,7 +382,7 @@ vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t src, vbool8_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_u8m2(src, mask, vl); + return __riscv_vcompress_vm_u8m2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4( @@ -391,7 +391,7 @@ vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t src, vbool4_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t src, vbool2_t mask, size_t vl) { - return vcompress_vm_u8m4(src, mask, vl); + return __riscv_vcompress_vm_u8m4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8( @@ -400,7 +400,7 @@ vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t src, vbool2_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t src, vbool1_t mask, size_t vl) { - return vcompress_vm_u8m8(src, mask, vl); + return __riscv_vcompress_vm_u8m8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4( @@ -409,7 +409,7 @@ vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t src, vbool1_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_u16mf4(src, mask, vl); + return __riscv_vcompress_vm_u16mf4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2( @@ -418,7 +418,7 @@ vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t src, vbool64_t mask, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_u16mf2(src, mask, vl); + return __riscv_vcompress_vm_u16mf2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1( @@ -427,7 +427,7 @@ vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t src, vbool32_t mask, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_u16m1(src, mask, vl); + return __riscv_vcompress_vm_u16m1(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2( @@ -436,7 +436,7 @@ vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t src, vbool16_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_u16m2(src, mask, vl); + return __riscv_vcompress_vm_u16m2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4( @@ -445,7 +445,7 @@ vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t src, vbool8_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_u16m4(src, mask, vl); + return __riscv_vcompress_vm_u16m4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8( @@ -454,7 +454,7 @@ vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t src, vbool4_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t src, vbool2_t mask, size_t vl) { - return vcompress_vm_u16m8(src, mask, vl); + return __riscv_vcompress_vm_u16m8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2( @@ -463,7 +463,7 @@ vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t src, vbool2_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_u32mf2(src, mask, vl); + return __riscv_vcompress_vm_u32mf2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1( @@ -472,7 +472,7 @@ vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t src, vbool64_t mask, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_u32m1(src, mask, vl); + return __riscv_vcompress_vm_u32m1(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2( @@ -481,7 +481,7 @@ vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t src, vbool32_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_u32m2(src, mask, vl); + return __riscv_vcompress_vm_u32m2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4( @@ -490,7 +490,7 @@ vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t src, vbool16_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_u32m4(src, mask, vl); + return __riscv_vcompress_vm_u32m4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8( @@ -499,7 +499,7 @@ vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t src, vbool8_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_u32m8(src, mask, vl); + return __riscv_vcompress_vm_u32m8(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1( @@ -508,7 +508,7 @@ vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t src, vbool4_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_u64m1(src, mask, vl); + return __riscv_vcompress_vm_u64m1(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2( @@ -517,7 +517,7 @@ vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t src, vbool64_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_u64m2(src, mask, vl); + return __riscv_vcompress_vm_u64m2(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4( @@ -526,7 +526,7 @@ vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t src, vbool32_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_u64m4(src, mask, vl); + return __riscv_vcompress_vm_u64m4(src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8( @@ -535,6 +535,6 @@ vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t src, vbool16_t mask, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_u64m8(src, mask, vl); + return __riscv_vcompress_vm_u64m8(src, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpop.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpop.c index 8b8c22acd1a1..3d856f02ed2d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpop.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpop.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) { - return vcpop_m_b1(op1, vl); + return __riscv_vcpop_m_b1(op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b2( @@ -22,7 +22,7 @@ unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) { - return vcpop_m_b2(op1, vl); + return __riscv_vcpop_m_b2(op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b4( @@ -31,7 +31,7 @@ unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) { - return vcpop_m_b4(op1, vl); + return __riscv_vcpop_m_b4(op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b8( @@ -40,7 +40,7 @@ unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) { - return vcpop_m_b8(op1, vl); + return __riscv_vcpop_m_b8(op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b16( @@ -49,7 +49,7 @@ unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) { - return vcpop_m_b16(op1, vl); + return __riscv_vcpop_m_b16(op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b32( @@ -58,7 +58,7 @@ unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) { - return vcpop_m_b32(op1, vl); + return __riscv_vcpop_m_b32(op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b64( @@ -67,7 +67,7 @@ unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) { - return vcpop_m_b64(op1, vl); + return __riscv_vcpop_m_b64(op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b1_m( @@ -76,7 +76,7 @@ unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return vcpop_m_b1_m(mask, op1, vl); + return __riscv_vcpop_m_b1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b2_m( @@ -85,7 +85,7 @@ unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return vcpop_m_b2_m(mask, op1, vl); + return __riscv_vcpop_m_b2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b4_m( @@ -94,7 +94,7 @@ unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return vcpop_m_b4_m(mask, op1, vl); + return __riscv_vcpop_m_b4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b8_m( @@ -103,7 +103,7 @@ unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return vcpop_m_b8_m(mask, op1, vl); + return __riscv_vcpop_m_b8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b16_m( @@ -112,7 +112,7 @@ unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return vcpop_m_b16_m(mask, op1, vl); + return __riscv_vcpop_m_b16_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b32_m( @@ -121,7 +121,7 @@ unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return vcpop_m_b32_m(mask, op1, vl); + return __riscv_vcpop_m_b32_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vcpop_m_b64_m( @@ -130,6 +130,6 @@ unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vcpop_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return vcpop_m_b64_m(mask, op1, vl); + return __riscv_vcpop_m_b64_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c index d392198f14b9..bc668c414b22 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdiv.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vdiv_vv_i8mf8(op1, op2, vl); + return __riscv_vdiv_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf8(op1, op2, vl); + return __riscv_vdiv_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vdiv_vv_i8mf4(op1, op2, vl); + return __riscv_vdiv_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf4(op1, op2, vl); + return __riscv_vdiv_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vdiv_vv_i8mf2(op1, op2, vl); + return __riscv_vdiv_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf2(op1, op2, vl); + return __riscv_vdiv_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vdiv_vv_i8m1(op1, op2, vl); + return __riscv_vdiv_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m1(op1, op2, vl); + return __riscv_vdiv_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vdiv_vv_i8m2(op1, op2, vl); + return __riscv_vdiv_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m2(op1, op2, vl); + return __riscv_vdiv_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vdiv_vv_i8m4(op1, op2, vl); + return __riscv_vdiv_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m4(op1, op2, vl); + return __riscv_vdiv_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vdiv_vv_i8m8(op1, op2, vl); + return __riscv_vdiv_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m8(op1, op2, vl); + return __riscv_vdiv_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vdiv_vv_i16mf4(op1, op2, vl); + return __riscv_vdiv_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf4(op1, op2, vl); + return __riscv_vdiv_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vdiv_vv_i16mf2(op1, op2, vl); + return __riscv_vdiv_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf2(op1, op2, vl); + return __riscv_vdiv_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vdiv_vv_i16m1(op1, op2, vl); + return __riscv_vdiv_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m1(op1, op2, vl); + return __riscv_vdiv_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vdiv_vv_i16m2(op1, op2, vl); + return __riscv_vdiv_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m2(op1, op2, vl); + return __riscv_vdiv_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vdiv_vv_i16m4(op1, op2, vl); + return __riscv_vdiv_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m4(op1, op2, vl); + return __riscv_vdiv_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vdiv_vv_i16m8(op1, op2, vl); + return __riscv_vdiv_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m8(op1, op2, vl); + return __riscv_vdiv_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vdiv_vv_i32mf2(op1, op2, vl); + return __riscv_vdiv_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32mf2(op1, op2, vl); + return __riscv_vdiv_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vdiv_vv_i32m1(op1, op2, vl); + return __riscv_vdiv_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m1(op1, op2, vl); + return __riscv_vdiv_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vdiv_vv_i32m2(op1, op2, vl); + return __riscv_vdiv_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m2(op1, op2, vl); + return __riscv_vdiv_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vdiv_vv_i32m4(op1, op2, vl); + return __riscv_vdiv_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m4(op1, op2, vl); + return __riscv_vdiv_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vdiv_vv_i32m8(op1, op2, vl); + return __riscv_vdiv_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m8(op1, op2, vl); + return __riscv_vdiv_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vdiv_vv_i64m1(op1, op2, vl); + return __riscv_vdiv_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m1(op1, op2, vl); + return __riscv_vdiv_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vdiv_vv_i64m2(op1, op2, vl); + return __riscv_vdiv_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m2(op1, op2, vl); + return __riscv_vdiv_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vdiv_vv_i64m4(op1, op2, vl); + return __riscv_vdiv_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m4(op1, op2, vl); + return __riscv_vdiv_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vdiv_vv_i64m8(op1, op2, vl); + return __riscv_vdiv_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m8(op1, op2, vl); + return __riscv_vdiv_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vdiv_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vdiv_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vdiv_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vdiv_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vdiv_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vdiv_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vdiv_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vdiv_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vdiv_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vdiv_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vdiv_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vdiv_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vdiv_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vdiv_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vdiv_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vdiv_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vdiv_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vdiv_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vdiv_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vdiv_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vdiv_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vdiv_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vdiv_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vdiv_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c index b92d6f1bbeec..cdb844c68154 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vdivu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vdivu_vv_u8mf8(op1, op2, vl); + return __riscv_vdivu_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf8(op1, op2, vl); + return __riscv_vdivu_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vdivu_vv_u8mf4(op1, op2, vl); + return __riscv_vdivu_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf4(op1, op2, vl); + return __riscv_vdivu_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vdivu_vv_u8mf2(op1, op2, vl); + return __riscv_vdivu_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf2(op1, op2, vl); + return __riscv_vdivu_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vdivu_vv_u8m1(op1, op2, vl); + return __riscv_vdivu_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m1(op1, op2, vl); + return __riscv_vdivu_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vdivu_vv_u8m2(op1, op2, vl); + return __riscv_vdivu_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m2(op1, op2, vl); + return __riscv_vdivu_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vdivu_vv_u8m4(op1, op2, vl); + return __riscv_vdivu_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m4(op1, op2, vl); + return __riscv_vdivu_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vdivu_vv_u8m8(op1, op2, vl); + return __riscv_vdivu_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m8(op1, op2, vl); + return __riscv_vdivu_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vdivu_vv_u16mf4(op1, op2, vl); + return __riscv_vdivu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf4(op1, op2, vl); + return __riscv_vdivu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vdivu_vv_u16mf2(op1, op2, vl); + return __riscv_vdivu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf2(op1, op2, vl); + return __riscv_vdivu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vdivu_vv_u16m1(op1, op2, vl); + return __riscv_vdivu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m1(op1, op2, vl); + return __riscv_vdivu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vdivu_vv_u16m2(op1, op2, vl); + return __riscv_vdivu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m2(op1, op2, vl); + return __riscv_vdivu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vdivu_vv_u16m4(op1, op2, vl); + return __riscv_vdivu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m4(op1, op2, vl); + return __riscv_vdivu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vdivu_vv_u16m8(op1, op2, vl); + return __riscv_vdivu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m8(op1, op2, vl); + return __riscv_vdivu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vdivu_vv_u32mf2(op1, op2, vl); + return __riscv_vdivu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32mf2(op1, op2, vl); + return __riscv_vdivu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vdivu_vv_u32m1(op1, op2, vl); + return __riscv_vdivu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m1(op1, op2, vl); + return __riscv_vdivu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vdivu_vv_u32m2(op1, op2, vl); + return __riscv_vdivu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m2(op1, op2, vl); + return __riscv_vdivu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vdivu_vv_u32m4(op1, op2, vl); + return __riscv_vdivu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m4(op1, op2, vl); + return __riscv_vdivu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vdivu_vv_u32m8(op1, op2, vl); + return __riscv_vdivu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m8(op1, op2, vl); + return __riscv_vdivu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1( @@ -336,7 +336,7 @@ vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vdivu_vv_u64m1(op1, op2, vl); + return __riscv_vdivu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1( @@ -345,7 +345,7 @@ vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m1(op1, op2, vl); + return __riscv_vdivu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2( @@ -354,7 +354,7 @@ vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vdivu_vv_u64m2(op1, op2, vl); + return __riscv_vdivu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2( @@ -363,7 +363,7 @@ vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m2(op1, op2, vl); + return __riscv_vdivu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4( @@ -372,7 +372,7 @@ vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vdivu_vv_u64m4(op1, op2, vl); + return __riscv_vdivu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4( @@ -381,7 +381,7 @@ vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m4(op1, op2, vl); + return __riscv_vdivu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8( @@ -390,7 +390,7 @@ vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vdivu_vv_u64m8(op1, op2, vl); + return __riscv_vdivu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m8(op1, op2, vl); + return __riscv_vdivu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vdivu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m( @@ -417,7 +417,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m( @@ -426,7 +426,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vdivu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m( @@ -435,7 +435,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m( @@ -444,7 +444,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vdivu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m( @@ -453,7 +453,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m( @@ -462,7 +462,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vdivu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m( @@ -471,7 +471,7 @@ vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m( @@ -480,7 +480,7 @@ vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vdivu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m( @@ -489,7 +489,7 @@ vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m( @@ -498,7 +498,7 @@ vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vdivu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m( @@ -507,7 +507,7 @@ vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m( @@ -516,7 +516,7 @@ vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vdivu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m( @@ -525,7 +525,7 @@ vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m( @@ -534,7 +534,7 @@ vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vdivu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m( @@ -543,7 +543,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m( @@ -552,7 +552,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vdivu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m( @@ -561,7 +561,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m( @@ -570,7 +570,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vdivu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m( @@ -579,7 +579,7 @@ vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m( @@ -588,7 +588,7 @@ vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vdivu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m( @@ -597,7 +597,7 @@ vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m( @@ -606,7 +606,7 @@ vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vdivu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m( @@ -615,7 +615,7 @@ vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m( @@ -624,7 +624,7 @@ vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vdivu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m( @@ -633,7 +633,7 @@ vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m( @@ -642,7 +642,7 @@ vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vdivu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m( @@ -651,7 +651,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m( @@ -660,7 +660,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vdivu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m( @@ -669,7 +669,7 @@ vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m( @@ -678,7 +678,7 @@ vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vdivu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m( @@ -687,7 +687,7 @@ vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m( @@ -696,7 +696,7 @@ vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vdivu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m( @@ -705,7 +705,7 @@ vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m( @@ -714,7 +714,7 @@ vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vdivu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m( @@ -723,7 +723,7 @@ vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m( @@ -732,7 +732,7 @@ vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vdivu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m( @@ -741,7 +741,7 @@ vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m( @@ -750,7 +750,7 @@ vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vdivu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m( @@ -759,7 +759,7 @@ vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m( @@ -768,7 +768,7 @@ vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vdivu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m( @@ -777,7 +777,7 @@ vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m( @@ -786,7 +786,7 @@ vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vdivu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vdivu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vdivu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c index df0e82ecdefa..4248ed5984c3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfabs.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfabs_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return vfabs_v_f16mf4(op1, vl); + return __riscv_vfabs_v_f16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4(vfloat16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfabs_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return vfabs_v_f16mf2(op1, vl); + return __riscv_vfabs_v_f16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2(vfloat16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfabs_v_f16m1(vfloat16m1_t op1, size_t vl) { - return vfabs_v_f16m1(op1, vl); + return __riscv_vfabs_v_f16m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfabs_v_f16m1(vfloat16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfabs_v_f16m2(vfloat16m2_t op1, size_t vl) { - return vfabs_v_f16m2(op1, vl); + return __riscv_vfabs_v_f16m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfabs_v_f16m2(vfloat16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfabs_v_f16m4(vfloat16m4_t op1, size_t vl) { - return vfabs_v_f16m4(op1, vl); + return __riscv_vfabs_v_f16m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfabs_v_f16m4(vfloat16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfabs_v_f16m8(vfloat16m8_t op1, size_t vl) { - return vfabs_v_f16m8(op1, vl); + return __riscv_vfabs_v_f16m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfabs_v_f16m8(vfloat16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return vfabs_v_f32mf2(op1, vl); + return __riscv_vfabs_v_f32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2(vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1(vfloat32m1_t op1, size_t vl) { - return vfabs_v_f32m1(op1, vl); + return __riscv_vfabs_v_f32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfabs_v_f32m1(vfloat32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2(vfloat32m2_t op1, size_t vl) { - return vfabs_v_f32m2(op1, vl); + return __riscv_vfabs_v_f32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfabs_v_f32m2(vfloat32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4(vfloat32m4_t op1, size_t vl) { - return vfabs_v_f32m4(op1, vl); + return __riscv_vfabs_v_f32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfabs_v_f32m4(vfloat32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8(vfloat32m8_t op1, size_t vl) { - return vfabs_v_f32m8(op1, vl); + return __riscv_vfabs_v_f32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfabs_v_f32m8(vfloat32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1(vfloat64m1_t op1, size_t vl) { - return vfabs_v_f64m1(op1, vl); + return __riscv_vfabs_v_f64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfabs_v_f64m1(vfloat64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2(vfloat64m2_t op1, size_t vl) { - return vfabs_v_f64m2(op1, vl); + return __riscv_vfabs_v_f64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfabs_v_f64m2(vfloat64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4(vfloat64m4_t op1, size_t vl) { - return vfabs_v_f64m4(op1, vl); + return __riscv_vfabs_v_f64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfabs_v_f64m4(vfloat64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8(vfloat64m8_t op1, size_t vl) { - return vfabs_v_f64m8(op1, vl); + return __riscv_vfabs_v_f64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_m( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfabs_v_f64m8(vfloat64m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return vfabs_v_f16mf4_m(mask, op1, vl); + return __riscv_vfabs_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_m( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return vfabs_v_f16mf2_m(mask, op1, vl); + return __riscv_vfabs_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m1_m( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return vfabs_v_f16m1_m(mask, op1, vl); + return __riscv_vfabs_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m2_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return vfabs_v_f16m2_m(mask, op1, vl); + return __riscv_vfabs_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m4_m( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return vfabs_v_f16m4_m(mask, op1, vl); + return __riscv_vfabs_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m8_m( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return vfabs_v_f16m8_m(mask, op1, vl); + return __riscv_vfabs_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return vfabs_v_f32mf2_m(mask, op1, vl); + return __riscv_vfabs_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return vfabs_v_f32m1_m(mask, op1, vl); + return __riscv_vfabs_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return vfabs_v_f32m2_m(mask, op1, vl); + return __riscv_vfabs_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return vfabs_v_f32m4_m(mask, op1, vl); + return __riscv_vfabs_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return vfabs_v_f32m8_m(mask, op1, vl); + return __riscv_vfabs_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return vfabs_v_f64m1_m(mask, op1, vl); + return __riscv_vfabs_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return vfabs_v_f64m2_m(mask, op1, vl); + return __riscv_vfabs_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return vfabs_v_f64m4_m(mask, op1, vl); + return __riscv_vfabs_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m( @@ -274,6 +274,6 @@ vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return vfabs_v_f64m8_m(mask, op1, vl); + return __riscv_vfabs_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c index 0dc58982a887..f3cfacc623da 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfadd_vv_f16mf4(op1, op2, vl); + return __riscv_vfadd_vv_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf4(op1, op2, vl); + return __riscv_vfadd_vf_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfadd_vv_f16mf2(op1, op2, vl); + return __riscv_vfadd_vv_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf2(op1, op2, vl); + return __riscv_vfadd_vf_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfadd_vv_f16m1(op1, op2, vl); + return __riscv_vfadd_vv_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m1(op1, op2, vl); + return __riscv_vfadd_vf_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfadd_vv_f16m2(op1, op2, vl); + return __riscv_vfadd_vv_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m2(op1, op2, vl); + return __riscv_vfadd_vf_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfadd_vv_f16m4(op1, op2, vl); + return __riscv_vfadd_vv_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m4(op1, op2, vl); + return __riscv_vfadd_vf_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfadd_vv_f16m8(op1, op2, vl); + return __riscv_vfadd_vv_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m8(op1, op2, vl); + return __riscv_vfadd_vf_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfadd_vv_f32mf2(op1, op2, vl); + return __riscv_vfadd_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfadd_vf_f32mf2(op1, op2, vl); + return __riscv_vfadd_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfadd_vv_f32m1(op1, op2, vl); + return __riscv_vfadd_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfadd_vf_f32m1(op1, op2, vl); + return __riscv_vfadd_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfadd_vv_f32m2(op1, op2, vl); + return __riscv_vfadd_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { - return vfadd_vf_f32m2(op1, op2, vl); + return __riscv_vfadd_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfadd_vv_f32m4(op1, op2, vl); + return __riscv_vfadd_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { - return vfadd_vf_f32m4(op1, op2, vl); + return __riscv_vfadd_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfadd_vv_f32m8(op1, op2, vl); + return __riscv_vfadd_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { - return vfadd_vf_f32m8(op1, op2, vl); + return __riscv_vfadd_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfadd_vv_f64m1(op1, op2, vl); + return __riscv_vfadd_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { - return vfadd_vf_f64m1(op1, op2, vl); + return __riscv_vfadd_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfadd_vv_f64m2(op1, op2, vl); + return __riscv_vfadd_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { - return vfadd_vf_f64m2(op1, op2, vl); + return __riscv_vfadd_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfadd_vv_f64m4(op1, op2, vl); + return __riscv_vfadd_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { - return vfadd_vf_f64m4(op1, op2, vl); + return __riscv_vfadd_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfadd_vv_f64m8(op1, op2, vl); + return __riscv_vfadd_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { - return vfadd_vf_f64m8(op1, op2, vl); + return __riscv_vfadd_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfadd_vv_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfadd_vv_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfadd_vv_f16m1_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m1_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfadd_vv_f16m2_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m2_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfadd_vv_f16m4_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m4_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfadd_vv_f16m8_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m8_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfadd_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfadd_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfadd_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfadd_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfadd_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfadd_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfadd_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfadd_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfadd_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfadd_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfadd_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfadd_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfadd_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfadd_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfadd_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfadd_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfadd_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfadd_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfadd_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfadd_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c index fb837294d87d..a80968808faf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfclass.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t op1, size_t vl) { - return vfclass_v_u16mf4(op1, vl); + return __riscv_vfclass_v_u16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2( @@ -22,7 +22,7 @@ vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t op1, size_t vl) { - return vfclass_v_u16mf2(op1, vl); + return __riscv_vfclass_v_u16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m1( @@ -31,7 +31,7 @@ vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t op1, size_t vl) { - return vfclass_v_u16m1(op1, vl); + return __riscv_vfclass_v_u16m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m2( @@ -40,7 +40,7 @@ vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t op1, size_t vl) { - return vfclass_v_u16m2(op1, vl); + return __riscv_vfclass_v_u16m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m4( @@ -49,7 +49,7 @@ vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t op1, size_t vl) { - return vfclass_v_u16m4(op1, vl); + return __riscv_vfclass_v_u16m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m8( @@ -58,7 +58,7 @@ vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfclass_v_u16m8(vfloat16m8_t op1, size_t vl) { - return vfclass_v_u16m8(op1, vl); + return __riscv_vfclass_v_u16m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2( @@ -67,7 +67,7 @@ vuint16m8_t test_vfclass_v_u16m8(vfloat16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) { - return vfclass_v_u32mf2(op1, vl); + return __riscv_vfclass_v_u32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m1( @@ -76,7 +76,7 @@ vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) { - return vfclass_v_u32m1(op1, vl); + return __riscv_vfclass_v_u32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m2( @@ -85,7 +85,7 @@ vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) { - return vfclass_v_u32m2(op1, vl); + return __riscv_vfclass_v_u32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m4( @@ -94,7 +94,7 @@ vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) { - return vfclass_v_u32m4(op1, vl); + return __riscv_vfclass_v_u32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m8( @@ -103,7 +103,7 @@ vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) { - return vfclass_v_u32m8(op1, vl); + return __riscv_vfclass_v_u32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m1( @@ -112,7 +112,7 @@ vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) { - return vfclass_v_u64m1(op1, vl); + return __riscv_vfclass_v_u64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m2( @@ -121,7 +121,7 @@ vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) { - return vfclass_v_u64m2(op1, vl); + return __riscv_vfclass_v_u64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m4( @@ -130,7 +130,7 @@ vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) { - return vfclass_v_u64m4(op1, vl); + return __riscv_vfclass_v_u64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m8( @@ -139,7 +139,7 @@ vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) { - return vfclass_v_u64m8(op1, vl); + return __riscv_vfclass_v_u64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_m( @@ -148,7 +148,7 @@ vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return vfclass_v_u16mf4_m(mask, op1, vl); + return __riscv_vfclass_v_u16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_m( @@ -157,7 +157,7 @@ vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return vfclass_v_u16mf2_m(mask, op1, vl); + return __riscv_vfclass_v_u16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m1_m( @@ -166,7 +166,7 @@ vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return vfclass_v_u16m1_m(mask, op1, vl); + return __riscv_vfclass_v_u16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m2_m( @@ -175,7 +175,7 @@ vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return vfclass_v_u16m2_m(mask, op1, vl); + return __riscv_vfclass_v_u16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m4_m( @@ -184,7 +184,7 @@ vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return vfclass_v_u16m4_m(mask, op1, vl); + return __riscv_vfclass_v_u16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m8_m( @@ -193,7 +193,7 @@ vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return vfclass_v_u16m8_m(mask, op1, vl); + return __riscv_vfclass_v_u16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m( @@ -202,7 +202,7 @@ vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return vfclass_v_u32mf2_m(mask, op1, vl); + return __riscv_vfclass_v_u32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m( @@ -211,7 +211,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return vfclass_v_u32m1_m(mask, op1, vl); + return __riscv_vfclass_v_u32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m( @@ -220,7 +220,7 @@ vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return vfclass_v_u32m2_m(mask, op1, vl); + return __riscv_vfclass_v_u32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m( @@ -229,7 +229,7 @@ vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return vfclass_v_u32m4_m(mask, op1, vl); + return __riscv_vfclass_v_u32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m( @@ -238,7 +238,7 @@ vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return vfclass_v_u32m8_m(mask, op1, vl); + return __riscv_vfclass_v_u32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m( @@ -247,7 +247,7 @@ vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return vfclass_v_u64m1_m(mask, op1, vl); + return __riscv_vfclass_v_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m( @@ -256,7 +256,7 @@ vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return vfclass_v_u64m2_m(mask, op1, vl); + return __riscv_vfclass_v_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m( @@ -265,7 +265,7 @@ vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return vfclass_v_u64m4_m(mask, op1, vl); + return __riscv_vfclass_v_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m( @@ -274,6 +274,6 @@ vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return vfclass_v_u64m8_m(mask, op1, vl); + return __riscv_vfclass_v_u64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c index 9f4e155e0468..207765b06818 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { - return vfcvt_x_f_v_i16mf4(src, vl); + return __riscv_vfcvt_x_f_v_i16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4( @@ -22,7 +22,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf4(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2( @@ -31,7 +31,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { - return vfcvt_x_f_v_i16mf2(src, vl); + return __riscv_vfcvt_x_f_v_i16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2( @@ -40,7 +40,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf2(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1( @@ -49,7 +49,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { - return vfcvt_x_f_v_i16m1(src, vl); + return __riscv_vfcvt_x_f_v_i16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1( @@ -58,7 +58,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m1(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2( @@ -67,7 +67,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { - return vfcvt_x_f_v_i16m2(src, vl); + return __riscv_vfcvt_x_f_v_i16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2( @@ -76,7 +76,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m2(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4( @@ -85,7 +85,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { - return vfcvt_x_f_v_i16m4(src, vl); + return __riscv_vfcvt_x_f_v_i16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4( @@ -94,7 +94,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m4(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8( @@ -103,7 +103,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { - return vfcvt_x_f_v_i16m8(src, vl); + return __riscv_vfcvt_x_f_v_i16m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8( @@ -112,7 +112,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m8(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4( @@ -121,7 +121,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf4(src, vl); + return __riscv_vfcvt_xu_f_v_u16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4( @@ -130,7 +130,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf4(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2( @@ -139,7 +139,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf2(src, vl); + return __riscv_vfcvt_xu_f_v_u16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2( @@ -148,7 +148,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf2(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1( @@ -157,7 +157,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { - return vfcvt_xu_f_v_u16m1(src, vl); + return __riscv_vfcvt_xu_f_v_u16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1( @@ -166,7 +166,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m1(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2( @@ -175,7 +175,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { - return vfcvt_xu_f_v_u16m2(src, vl); + return __riscv_vfcvt_xu_f_v_u16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2( @@ -184,7 +184,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m2(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4( @@ -193,7 +193,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { - return vfcvt_xu_f_v_u16m4(src, vl); + return __riscv_vfcvt_xu_f_v_u16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4( @@ -202,7 +202,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m4(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8( @@ -211,7 +211,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { - return vfcvt_xu_f_v_u16m8(src, vl); + return __riscv_vfcvt_xu_f_v_u16m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8( @@ -220,7 +220,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m8(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4( @@ -229,7 +229,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t src, size_t vl) { - return vfcvt_f_x_v_f16mf4(src, vl); + return __riscv_vfcvt_f_x_v_f16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2( @@ -238,7 +238,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t src, size_t vl) { - return vfcvt_f_x_v_f16mf2(src, vl); + return __riscv_vfcvt_f_x_v_f16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1( @@ -247,7 +247,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t src, size_t vl) { - return vfcvt_f_x_v_f16m1(src, vl); + return __riscv_vfcvt_f_x_v_f16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2( @@ -256,7 +256,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t src, size_t vl) { - return vfcvt_f_x_v_f16m2(src, vl); + return __riscv_vfcvt_f_x_v_f16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4( @@ -265,7 +265,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t src, size_t vl) { - return vfcvt_f_x_v_f16m4(src, vl); + return __riscv_vfcvt_f_x_v_f16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8( @@ -274,7 +274,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t src, size_t vl) { - return vfcvt_f_x_v_f16m8(src, vl); + return __riscv_vfcvt_f_x_v_f16m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4( @@ -283,7 +283,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf4(src, vl); + return __riscv_vfcvt_f_xu_v_f16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf2(src, vl); + return __riscv_vfcvt_f_xu_v_f16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1( @@ -301,7 +301,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t src, size_t vl) { - return vfcvt_f_xu_v_f16m1(src, vl); + return __riscv_vfcvt_f_xu_v_f16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2( @@ -310,7 +310,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t src, size_t vl) { - return vfcvt_f_xu_v_f16m2(src, vl); + return __riscv_vfcvt_f_xu_v_f16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4( @@ -319,7 +319,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t src, size_t vl) { - return vfcvt_f_xu_v_f16m4(src, vl); + return __riscv_vfcvt_f_xu_v_f16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8( @@ -328,7 +328,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_xu_v_f16m8(vuint16m8_t src, size_t vl) { - return vfcvt_f_xu_v_f16m8(src, vl); + return __riscv_vfcvt_f_xu_v_f16m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2( @@ -337,7 +337,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8(vuint16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { - return vfcvt_x_f_v_i32mf2(src, vl); + return __riscv_vfcvt_x_f_v_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( @@ -346,7 +346,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32mf2(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1( @@ -355,7 +355,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { - return vfcvt_x_f_v_i32m1(src, vl); + return __riscv_vfcvt_x_f_v_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( @@ -364,7 +364,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m1(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2( @@ -373,7 +373,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { - return vfcvt_x_f_v_i32m2(src, vl); + return __riscv_vfcvt_x_f_v_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( @@ -382,7 +382,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m2(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4( @@ -391,7 +391,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { - return vfcvt_x_f_v_i32m4(src, vl); + return __riscv_vfcvt_x_f_v_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( @@ -400,7 +400,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m4(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8( @@ -409,7 +409,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { - return vfcvt_x_f_v_i32m8(src, vl); + return __riscv_vfcvt_x_f_v_i32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( @@ -418,7 +418,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m8(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2( @@ -427,7 +427,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u32mf2(src, vl); + return __riscv_vfcvt_xu_f_v_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( @@ -436,7 +436,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32mf2(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1( @@ -445,7 +445,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { - return vfcvt_xu_f_v_u32m1(src, vl); + return __riscv_vfcvt_xu_f_v_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( @@ -454,7 +454,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m1(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2( @@ -463,7 +463,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { - return vfcvt_xu_f_v_u32m2(src, vl); + return __riscv_vfcvt_xu_f_v_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( @@ -472,7 +472,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m2(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4( @@ -481,7 +481,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { - return vfcvt_xu_f_v_u32m4(src, vl); + return __riscv_vfcvt_xu_f_v_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( @@ -490,7 +490,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m4(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8( @@ -499,7 +499,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { - return vfcvt_xu_f_v_u32m8(src, vl); + return __riscv_vfcvt_xu_f_v_u32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( @@ -508,7 +508,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m8(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2( @@ -517,7 +517,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) { - return vfcvt_f_x_v_f32mf2(src, vl); + return __riscv_vfcvt_f_x_v_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1( @@ -526,7 +526,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) { - return vfcvt_f_x_v_f32m1(src, vl); + return __riscv_vfcvt_f_x_v_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2( @@ -535,7 +535,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) { - return vfcvt_f_x_v_f32m2(src, vl); + return __riscv_vfcvt_f_x_v_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4( @@ -544,7 +544,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) { - return vfcvt_f_x_v_f32m4(src, vl); + return __riscv_vfcvt_f_x_v_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8( @@ -553,7 +553,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) { - return vfcvt_f_x_v_f32m8(src, vl); + return __riscv_vfcvt_f_x_v_f32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2( @@ -562,7 +562,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f32mf2(src, vl); + return __riscv_vfcvt_f_xu_v_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1( @@ -571,7 +571,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) { - return vfcvt_f_xu_v_f32m1(src, vl); + return __riscv_vfcvt_f_xu_v_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2( @@ -580,7 +580,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) { - return vfcvt_f_xu_v_f32m2(src, vl); + return __riscv_vfcvt_f_xu_v_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4( @@ -589,7 +589,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) { - return vfcvt_f_xu_v_f32m4(src, vl); + return __riscv_vfcvt_f_xu_v_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8( @@ -598,7 +598,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) { - return vfcvt_f_xu_v_f32m8(src, vl); + return __riscv_vfcvt_f_xu_v_f32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1( @@ -607,7 +607,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { - return vfcvt_x_f_v_i64m1(src, vl); + return __riscv_vfcvt_x_f_v_i64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( @@ -616,7 +616,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m1(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2( @@ -625,7 +625,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { - return vfcvt_x_f_v_i64m2(src, vl); + return __riscv_vfcvt_x_f_v_i64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( @@ -634,7 +634,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m2(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4( @@ -643,7 +643,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { - return vfcvt_x_f_v_i64m4(src, vl); + return __riscv_vfcvt_x_f_v_i64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( @@ -652,7 +652,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m4(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8( @@ -661,7 +661,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { - return vfcvt_x_f_v_i64m8(src, vl); + return __riscv_vfcvt_x_f_v_i64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( @@ -670,7 +670,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m8(src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1( @@ -679,7 +679,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { - return vfcvt_xu_f_v_u64m1(src, vl); + return __riscv_vfcvt_xu_f_v_u64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( @@ -688,7 +688,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m1(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2( @@ -697,7 +697,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { - return vfcvt_xu_f_v_u64m2(src, vl); + return __riscv_vfcvt_xu_f_v_u64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( @@ -706,7 +706,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m2(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4( @@ -715,7 +715,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { - return vfcvt_xu_f_v_u64m4(src, vl); + return __riscv_vfcvt_xu_f_v_u64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( @@ -724,7 +724,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m4(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8( @@ -733,7 +733,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { - return vfcvt_xu_f_v_u64m8(src, vl); + return __riscv_vfcvt_xu_f_v_u64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( @@ -742,7 +742,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m8(src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1( @@ -751,7 +751,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) { - return vfcvt_f_x_v_f64m1(src, vl); + return __riscv_vfcvt_f_x_v_f64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) { - return vfcvt_f_x_v_f64m2(src, vl); + return __riscv_vfcvt_f_x_v_f64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4( @@ -769,7 +769,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) { - return vfcvt_f_x_v_f64m4(src, vl); + return __riscv_vfcvt_f_x_v_f64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8( @@ -778,7 +778,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) { - return vfcvt_f_x_v_f64m8(src, vl); + return __riscv_vfcvt_f_x_v_f64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1( @@ -787,7 +787,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) { - return vfcvt_f_xu_v_f64m1(src, vl); + return __riscv_vfcvt_f_xu_v_f64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2( @@ -796,7 +796,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) { - return vfcvt_f_xu_v_f64m2(src, vl); + return __riscv_vfcvt_f_xu_v_f64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4( @@ -805,7 +805,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) { - return vfcvt_f_xu_v_f64m4(src, vl); + return __riscv_vfcvt_f_xu_v_f64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8( @@ -814,7 +814,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { - return vfcvt_f_xu_v_f64m8(src, vl); + return __riscv_vfcvt_f_xu_v_f64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_m( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfcvt_x_f_v_i16mf4_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( @@ -832,7 +832,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf4_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_m( @@ -841,7 +841,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfcvt_x_f_v_i16mf2_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( @@ -850,7 +850,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf2_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_m( @@ -859,7 +859,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfcvt_x_f_v_i16m1_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( @@ -868,7 +868,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m1_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_m( @@ -877,7 +877,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfcvt_x_f_v_i16m2_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( @@ -886,7 +886,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m2_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_m( @@ -895,7 +895,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfcvt_x_f_v_i16m4_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( @@ -904,7 +904,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m4_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_m( @@ -913,7 +913,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return vfcvt_x_f_v_i16m8_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( @@ -922,7 +922,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m8_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_m( @@ -931,7 +931,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf4_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( @@ -940,7 +940,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf4_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_m( @@ -949,7 +949,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf2_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( @@ -958,7 +958,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf2_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_m( @@ -967,7 +967,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfcvt_xu_f_v_u16m1_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( @@ -976,7 +976,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m1_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_m( @@ -985,7 +985,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfcvt_xu_f_v_u16m2_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( @@ -994,7 +994,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m2_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_m( @@ -1003,7 +1003,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfcvt_xu_f_v_u16m4_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( @@ -1012,7 +1012,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m4_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_m( @@ -1021,7 +1021,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return vfcvt_xu_f_v_u16m8_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( @@ -1030,7 +1030,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m8_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_m( @@ -1039,7 +1039,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return vfcvt_f_x_v_f16mf4_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_m( @@ -1048,7 +1048,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vint16mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return vfcvt_f_x_v_f16mf2_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_m( @@ -1057,7 +1057,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vint16mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return vfcvt_f_x_v_f16m1_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_m( @@ -1066,7 +1066,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vint16m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return vfcvt_f_x_v_f16m2_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_m( @@ -1075,7 +1075,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vint16m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return vfcvt_f_x_v_f16m4_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_m( @@ -1084,7 +1084,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vint16m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vint16m8_t src, size_t vl) { - return vfcvt_f_x_v_f16m8_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_m( @@ -1093,7 +1093,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vint16m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf4_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_m( @@ -1102,7 +1102,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint16mf4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf2_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_m( @@ -1111,7 +1111,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint16mf2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return vfcvt_f_xu_v_f16m1_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_m( @@ -1120,7 +1120,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint16m1_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return vfcvt_f_xu_v_f16m2_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_m( @@ -1129,7 +1129,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint16m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return vfcvt_f_xu_v_f16m4_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_m( @@ -1138,7 +1138,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint16m4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint16m8_t src, size_t vl) { - return vfcvt_f_xu_v_f16m8_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m( @@ -1147,7 +1147,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint16m8_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfcvt_x_f_v_i32mf2_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( @@ -1156,7 +1156,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( @@ -1165,7 +1165,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfcvt_x_f_v_i32m1_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( @@ -1174,7 +1174,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m1_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( @@ -1183,7 +1183,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfcvt_x_f_v_i32m2_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( @@ -1192,7 +1192,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m2_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( @@ -1201,7 +1201,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfcvt_x_f_v_i32m4_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( @@ -1210,7 +1210,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m4_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( @@ -1219,7 +1219,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vfcvt_x_f_v_i32m8_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( @@ -1228,7 +1228,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m8_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( @@ -1237,7 +1237,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u32mf2_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( @@ -1246,7 +1246,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( @@ -1255,7 +1255,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfcvt_xu_f_v_u32m1_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( @@ -1264,7 +1264,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( @@ -1273,7 +1273,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfcvt_xu_f_v_u32m2_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( @@ -1282,7 +1282,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( @@ -1291,7 +1291,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfcvt_xu_f_v_u32m4_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( @@ -1300,7 +1300,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( @@ -1309,7 +1309,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vfcvt_xu_f_v_u32m8_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( @@ -1318,7 +1318,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( @@ -1327,7 +1327,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return vfcvt_f_x_v_f32mf2_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m( @@ -1336,7 +1336,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vint32mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return vfcvt_f_x_v_f32m1_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m( @@ -1345,7 +1345,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vint32m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return vfcvt_f_x_v_f32m2_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m( @@ -1354,7 +1354,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vint32m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return vfcvt_f_x_v_f32m4_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m( @@ -1363,7 +1363,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vint32m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return vfcvt_f_x_v_f32m8_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( @@ -1372,7 +1372,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vint32m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f32mf2_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m( @@ -1381,7 +1381,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint32mf2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return vfcvt_f_xu_v_f32m1_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m( @@ -1390,7 +1390,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint32m1_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return vfcvt_f_xu_v_f32m2_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m( @@ -1399,7 +1399,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint32m2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return vfcvt_f_xu_v_f32m4_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m( @@ -1408,7 +1408,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint32m4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return vfcvt_f_xu_v_f32m8_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m( @@ -1417,7 +1417,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint32m8_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vfcvt_x_f_v_i64m1_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( @@ -1426,7 +1426,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m1_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( @@ -1435,7 +1435,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vfcvt_x_f_v_i64m2_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( @@ -1444,7 +1444,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m2_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( @@ -1453,7 +1453,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vfcvt_x_f_v_i64m4_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( @@ -1462,7 +1462,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m4_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( @@ -1471,7 +1471,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vfcvt_x_f_v_i64m8_m(mask, src, vl); + return __riscv_vfcvt_x_f_v_i64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( @@ -1480,7 +1480,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m8_m(mask, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( @@ -1489,7 +1489,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vfcvt_xu_f_v_u64m1_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( @@ -1498,7 +1498,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( @@ -1507,7 +1507,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vfcvt_xu_f_v_u64m2_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( @@ -1516,7 +1516,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( @@ -1525,7 +1525,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vfcvt_xu_f_v_u64m4_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( @@ -1534,7 +1534,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( @@ -1543,7 +1543,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vfcvt_xu_f_v_u64m8_m(mask, src, vl); + return __riscv_vfcvt_xu_f_v_u64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( @@ -1552,7 +1552,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( @@ -1561,7 +1561,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return vfcvt_f_x_v_f64m1_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m( @@ -1570,7 +1570,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vint64m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return vfcvt_f_x_v_f64m2_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m( @@ -1579,7 +1579,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vint64m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return vfcvt_f_x_v_f64m4_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m( @@ -1588,7 +1588,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vint64m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return vfcvt_f_x_v_f64m8_m(mask, src, vl); + return __riscv_vfcvt_f_x_v_f64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m( @@ -1597,7 +1597,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vint64m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return vfcvt_f_xu_v_f64m1_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m( @@ -1606,7 +1606,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint64m1_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return vfcvt_f_xu_v_f64m2_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m( @@ -1615,7 +1615,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint64m2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return vfcvt_f_xu_v_f64m4_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m( @@ -1624,6 +1624,6 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint64m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return vfcvt_f_xu_v_f64m8_m(mask, src, vl); + return __riscv_vfcvt_f_xu_v_f64m8_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c index 02c762280f43..4dd1a0d763e9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfdiv.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfdiv_vv_f16mf4(op1, op2, vl); + return __riscv_vfdiv_vv_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf4(op1, op2, vl); + return __riscv_vfdiv_vf_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfdiv_vv_f16mf2(op1, op2, vl); + return __riscv_vfdiv_vv_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf2(op1, op2, vl); + return __riscv_vfdiv_vf_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfdiv_vv_f16m1(op1, op2, vl); + return __riscv_vfdiv_vv_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m1(op1, op2, vl); + return __riscv_vfdiv_vf_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfdiv_vv_f16m2(op1, op2, vl); + return __riscv_vfdiv_vv_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m2(op1, op2, vl); + return __riscv_vfdiv_vf_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfdiv_vv_f16m4(op1, op2, vl); + return __riscv_vfdiv_vv_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m4(op1, op2, vl); + return __riscv_vfdiv_vf_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfdiv_vv_f16m8(op1, op2, vl); + return __riscv_vfdiv_vv_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m8(op1, op2, vl); + return __riscv_vfdiv_vf_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfdiv_vv_f32mf2(op1, op2, vl); + return __riscv_vfdiv_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32mf2(op1, op2, vl); + return __riscv_vfdiv_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfdiv_vv_f32m1(op1, op2, vl); + return __riscv_vfdiv_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m1(op1, op2, vl); + return __riscv_vfdiv_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfdiv_vv_f32m2(op1, op2, vl); + return __riscv_vfdiv_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m2(op1, op2, vl); + return __riscv_vfdiv_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfdiv_vv_f32m4(op1, op2, vl); + return __riscv_vfdiv_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m4(op1, op2, vl); + return __riscv_vfdiv_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfdiv_vv_f32m8(op1, op2, vl); + return __riscv_vfdiv_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m8(op1, op2, vl); + return __riscv_vfdiv_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfdiv_vv_f64m1(op1, op2, vl); + return __riscv_vfdiv_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m1(op1, op2, vl); + return __riscv_vfdiv_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfdiv_vv_f64m2(op1, op2, vl); + return __riscv_vfdiv_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m2(op1, op2, vl); + return __riscv_vfdiv_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfdiv_vv_f64m4(op1, op2, vl); + return __riscv_vfdiv_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m4(op1, op2, vl); + return __riscv_vfdiv_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfdiv_vv_f64m8(op1, op2, vl); + return __riscv_vfdiv_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m8(op1, op2, vl); + return __riscv_vfdiv_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfdiv_vv_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfdiv_vv_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfdiv_vv_f16m1_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m1_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfdiv_vv_f16m2_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m2_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfdiv_vv_f16m4_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m4_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfdiv_vv_f16m8_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m8_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfdiv_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfdiv_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfdiv_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfdiv_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfdiv_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfdiv_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfdiv_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfdiv_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfdiv_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfdiv_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfdiv_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfirst.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfirst.c index 2b69a4a2b0de..5dbb93c8092e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfirst.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfirst.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b1(vbool1_t op1, size_t vl) { - return vfirst_m_b1(op1, vl); + return __riscv_vfirst_m_b1(op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b2( @@ -21,7 +21,7 @@ long test_vfirst_m_b1(vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b2(vbool2_t op1, size_t vl) { - return vfirst_m_b2(op1, vl); + return __riscv_vfirst_m_b2(op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b4( @@ -30,7 +30,7 @@ long test_vfirst_m_b2(vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b4(vbool4_t op1, size_t vl) { - return vfirst_m_b4(op1, vl); + return __riscv_vfirst_m_b4(op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b8( @@ -39,7 +39,7 @@ long test_vfirst_m_b4(vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b8(vbool8_t op1, size_t vl) { - return vfirst_m_b8(op1, vl); + return __riscv_vfirst_m_b8(op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b16( @@ -48,7 +48,7 @@ long test_vfirst_m_b8(vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b16(vbool16_t op1, size_t vl) { - return vfirst_m_b16(op1, vl); + return __riscv_vfirst_m_b16(op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b32( @@ -57,7 +57,7 @@ long test_vfirst_m_b16(vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b32(vbool32_t op1, size_t vl) { - return vfirst_m_b32(op1, vl); + return __riscv_vfirst_m_b32(op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b64( @@ -66,7 +66,7 @@ long test_vfirst_m_b32(vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b64(vbool64_t op1, size_t vl) { - return vfirst_m_b64(op1, vl); + return __riscv_vfirst_m_b64(op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b1_m( @@ -75,7 +75,7 @@ long test_vfirst_m_b64(vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return vfirst_m_b1_m(mask, op1, vl); + return __riscv_vfirst_m_b1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b2_m( @@ -84,7 +84,7 @@ long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return vfirst_m_b2_m(mask, op1, vl); + return __riscv_vfirst_m_b2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b4_m( @@ -93,7 +93,7 @@ long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return vfirst_m_b4_m(mask, op1, vl); + return __riscv_vfirst_m_b4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b8_m( @@ -102,7 +102,7 @@ long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return vfirst_m_b8_m(mask, op1, vl); + return __riscv_vfirst_m_b8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b16_m( @@ -111,7 +111,7 @@ long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return vfirst_m_b16_m(mask, op1, vl); + return __riscv_vfirst_m_b16_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b32_m( @@ -120,7 +120,7 @@ long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return vfirst_m_b32_m(mask, op1, vl); + return __riscv_vfirst_m_b32_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfirst_m_b64_m( @@ -129,6 +129,6 @@ long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return vfirst_m_b64_m(mask, op1, vl); + return __riscv_vfirst_m_b64_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c index 299477907921..6047c3d9f7fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vv_f16mf4(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vf_f16mf4(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vv_f16mf2(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vf_f16mf2(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vv_f16m1(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vf_f16m1(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vv_f16m2(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vf_f16m2(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vv_f16m4(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vf_f16m4(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vv_f16m8(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vf_f16m8(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vf_f32mf2(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vf_f32m1(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vf_f32m2(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vf_f32m4(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vf_f32m8(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vf_f64m1(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vf_f64m2(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vf_f64m4(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vf_f64m8(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vv_f16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vf_f16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vv_f16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vf_f16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vv_f16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vf_f16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vv_f16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vf_f16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vv_f16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vf_f16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vf_f16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vf_f32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vf_f32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vf_f32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vf_f32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vf_f32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vf_f64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vf_f64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vf_f64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vf_f64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c index 0dbaeb4e1dda..5116621ea83d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmadd.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vv_f16mf4(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vf_f16mf4(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vv_f16mf2(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vf_f16mf2(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vv_f16m1(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vf_f16m1(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vv_f16m2(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vf_f16m2(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vv_f16m4(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vf_f16m4(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vv_f16m8(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vf_f16m8(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vf_f32mf2(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vf_f32m1(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vf_f32m2(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vf_f32m4(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vf_f32m8(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vf_f64m1(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vf_f64m2(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vf_f64m4(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vf_f64m8(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vv_f16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vf_f16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vv_f16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vf_f16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vv_f16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vf_f16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vv_f16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vf_f16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vv_f16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vf_f16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vf_f16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vf_f32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vf_f32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vf_f32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vf_f32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vf_f32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vf_f64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vf_f64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vf_f64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vf_f64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c index 5e6a7780c3d2..8300813b6314 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmax.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmax_vv_f16mf4(op1, op2, vl); + return __riscv_vfmax_vv_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf4(op1, op2, vl); + return __riscv_vfmax_vf_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmax_vv_f16mf2(op1, op2, vl); + return __riscv_vfmax_vv_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf2(op1, op2, vl); + return __riscv_vfmax_vf_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmax_vv_f16m1(op1, op2, vl); + return __riscv_vfmax_vv_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m1(op1, op2, vl); + return __riscv_vfmax_vf_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmax_vv_f16m2(op1, op2, vl); + return __riscv_vfmax_vv_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m2(op1, op2, vl); + return __riscv_vfmax_vf_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmax_vv_f16m4(op1, op2, vl); + return __riscv_vfmax_vv_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m4(op1, op2, vl); + return __riscv_vfmax_vf_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmax_vv_f16m8(op1, op2, vl); + return __riscv_vfmax_vv_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m8(op1, op2, vl); + return __riscv_vfmax_vf_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmax_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmax_vv_f32mf2(op1, op2, vl); + return __riscv_vfmax_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfmax_vf_f32mf2(op1, op2, vl); + return __riscv_vfmax_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmax_vv_f32m1(op1, op2, vl); + return __riscv_vfmax_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfmax_vf_f32m1(op1, op2, vl); + return __riscv_vfmax_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmax_vv_f32m2(op1, op2, vl); + return __riscv_vfmax_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { - return vfmax_vf_f32m2(op1, op2, vl); + return __riscv_vfmax_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmax_vv_f32m4(op1, op2, vl); + return __riscv_vfmax_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { - return vfmax_vf_f32m4(op1, op2, vl); + return __riscv_vfmax_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmax_vv_f32m8(op1, op2, vl); + return __riscv_vfmax_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { - return vfmax_vf_f32m8(op1, op2, vl); + return __riscv_vfmax_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmax_vv_f64m1(op1, op2, vl); + return __riscv_vfmax_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { - return vfmax_vf_f64m1(op1, op2, vl); + return __riscv_vfmax_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmax_vv_f64m2(op1, op2, vl); + return __riscv_vfmax_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { - return vfmax_vf_f64m2(op1, op2, vl); + return __riscv_vfmax_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmax_vv_f64m4(op1, op2, vl); + return __riscv_vfmax_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { - return vfmax_vf_f64m4(op1, op2, vl); + return __riscv_vfmax_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmax_vv_f64m8(op1, op2, vl); + return __riscv_vfmax_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { - return vfmax_vf_f64m8(op1, op2, vl); + return __riscv_vfmax_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmax_vv_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmax_vv_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmax_vv_f16m1_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m1_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmax_vv_f16m2_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m2_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmax_vv_f16m4_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m4_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmax_vv_f16m8_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m8_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmax_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmax_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmax_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfmax_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmax_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfmax_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmax_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfmax_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmax_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfmax_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmax_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfmax_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmax_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfmax_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmax_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfmax_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmax_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfmax_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfmax_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfmax_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c index 29de4c140532..bd6a3d396c8b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { - return vfmerge_vfm_f16mf4(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t op1, _Float16 op2, vbool64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { - return vfmerge_vfm_f16mf2(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t op1, _Float16 op2, vbool32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { - return vfmerge_vfm_f16m1(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t op1, _Float16 op2, vbool16_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { - return vfmerge_vfm_f16m2(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t op1, _Float16 op2, vbool8_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { - return vfmerge_vfm_f16m4(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t op1, _Float16 op2, vbool4_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { - return vfmerge_vfm_f16m8(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t op1, _Float16 op2, vbool2_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { - return vfmerge_vfm_f32mf2(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t op1, float op2, vbool64_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { - return vfmerge_vfm_f32m1(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t op1, float op2, vbool32_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { - return vfmerge_vfm_f32m2(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t op1, float op2, vbool16_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { - return vfmerge_vfm_f32m4(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t op1, float op2, vbool8_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { - return vfmerge_vfm_f32m8(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t op1, float op2, vbool4_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { - return vfmerge_vfm_f64m1(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t op1, double op2, vbool64_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { - return vfmerge_vfm_f64m2(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t op1, double op2, vbool32_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { - return vfmerge_vfm_f64m4(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8( @@ -139,6 +139,6 @@ vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t op1, double op2, vbool16_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmerge_vfm_f64m8(vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { - return vfmerge_vfm_f64m8(op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f64m8(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c index 5ac0ce2a597e..14cd2528b56a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmin.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmin_vv_f16mf4(op1, op2, vl); + return __riscv_vfmin_vv_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf4(op1, op2, vl); + return __riscv_vfmin_vf_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmin_vv_f16mf2(op1, op2, vl); + return __riscv_vfmin_vv_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf2(op1, op2, vl); + return __riscv_vfmin_vf_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmin_vv_f16m1(op1, op2, vl); + return __riscv_vfmin_vv_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m1(op1, op2, vl); + return __riscv_vfmin_vf_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmin_vv_f16m2(op1, op2, vl); + return __riscv_vfmin_vv_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m2(op1, op2, vl); + return __riscv_vfmin_vf_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmin_vv_f16m4(op1, op2, vl); + return __riscv_vfmin_vv_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m4(op1, op2, vl); + return __riscv_vfmin_vf_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmin_vv_f16m8(op1, op2, vl); + return __riscv_vfmin_vv_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m8(op1, op2, vl); + return __riscv_vfmin_vf_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmin_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmin_vv_f32mf2(op1, op2, vl); + return __riscv_vfmin_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfmin_vf_f32mf2(op1, op2, vl); + return __riscv_vfmin_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmin_vv_f32m1(op1, op2, vl); + return __riscv_vfmin_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfmin_vf_f32m1(op1, op2, vl); + return __riscv_vfmin_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmin_vv_f32m2(op1, op2, vl); + return __riscv_vfmin_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { - return vfmin_vf_f32m2(op1, op2, vl); + return __riscv_vfmin_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmin_vv_f32m4(op1, op2, vl); + return __riscv_vfmin_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { - return vfmin_vf_f32m4(op1, op2, vl); + return __riscv_vfmin_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmin_vv_f32m8(op1, op2, vl); + return __riscv_vfmin_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { - return vfmin_vf_f32m8(op1, op2, vl); + return __riscv_vfmin_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmin_vv_f64m1(op1, op2, vl); + return __riscv_vfmin_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { - return vfmin_vf_f64m1(op1, op2, vl); + return __riscv_vfmin_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmin_vv_f64m2(op1, op2, vl); + return __riscv_vfmin_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { - return vfmin_vf_f64m2(op1, op2, vl); + return __riscv_vfmin_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmin_vv_f64m4(op1, op2, vl); + return __riscv_vfmin_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { - return vfmin_vf_f64m4(op1, op2, vl); + return __riscv_vfmin_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmin_vv_f64m8(op1, op2, vl); + return __riscv_vfmin_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { - return vfmin_vf_f64m8(op1, op2, vl); + return __riscv_vfmin_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmin_vv_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmin_vv_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmin_vv_f16m1_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m1_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmin_vv_f16m2_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m2_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmin_vv_f16m4_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m4_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmin_vv_f16m8_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m8_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmin_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmin_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmin_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfmin_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmin_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfmin_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmin_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfmin_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmin_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfmin_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmin_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfmin_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmin_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfmin_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmin_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfmin_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmin_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfmin_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfmin_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfmin_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c index 37841fde294e..e69490af7e33 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsac.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vv_f16mf4(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vf_f16mf4(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vv_f16mf2(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vf_f16mf2(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vv_f16m1(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vf_f16m1(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vv_f16m2(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vf_f16m2(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vv_f16m4(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vf_f16m4(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vv_f16m8(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vf_f16m8(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vf_f32mf2(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vf_f32m1(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vf_f32m2(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vf_f32m4(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vf_f32m8(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vf_f64m1(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vf_f64m2(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vf_f64m4(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vf_f64m8(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vv_f16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vf_f16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vv_f16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vf_f16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vv_f16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vf_f16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vv_f16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vf_f16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vv_f16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vf_f16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vf_f16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vf_f32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vf_f32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vf_f32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vf_f32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vf_f32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vf_f64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vf_f64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vf_f64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vf_f64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c index b69ddc41e714..c09594fdb579 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vv_f16mf4(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vf_f16mf4(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vv_f16mf2(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vf_f16mf2(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vv_f16m1(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vf_f16m1(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vv_f16m2(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vf_f16m2(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vv_f16m4(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vf_f16m4(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vv_f16m8(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vf_f16m8(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vf_f32mf2(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vf_f32m1(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vf_f32m2(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vf_f32m4(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vf_f32m8(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vf_f64m1(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vf_f64m2(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vf_f64m4(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vf_f64m8(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vv_f16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vf_f16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vv_f16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vf_f16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vv_f16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vf_f16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vv_f16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vf_f16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vv_f16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vf_f16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vf_f16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vf_f32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vf_f32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vf_f32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vf_f32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vf_f32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vf_f64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vf_f64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vf_f64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vf_f64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c index f848d54d04fd..c9150b36277d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmul.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmul_vv_f16mf4(op1, op2, vl); + return __riscv_vfmul_vv_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf4(op1, op2, vl); + return __riscv_vfmul_vf_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmul_vv_f16mf2(op1, op2, vl); + return __riscv_vfmul_vv_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf2(op1, op2, vl); + return __riscv_vfmul_vf_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmul_vv_f16m1(op1, op2, vl); + return __riscv_vfmul_vv_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m1(op1, op2, vl); + return __riscv_vfmul_vf_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmul_vv_f16m2(op1, op2, vl); + return __riscv_vfmul_vv_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m2(op1, op2, vl); + return __riscv_vfmul_vf_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmul_vv_f16m4(op1, op2, vl); + return __riscv_vfmul_vv_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m4(op1, op2, vl); + return __riscv_vfmul_vf_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmul_vv_f16m8(op1, op2, vl); + return __riscv_vfmul_vv_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m8(op1, op2, vl); + return __riscv_vfmul_vf_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmul_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmul_vv_f32mf2(op1, op2, vl); + return __riscv_vfmul_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfmul_vf_f32mf2(op1, op2, vl); + return __riscv_vfmul_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmul_vv_f32m1(op1, op2, vl); + return __riscv_vfmul_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfmul_vf_f32m1(op1, op2, vl); + return __riscv_vfmul_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmul_vv_f32m2(op1, op2, vl); + return __riscv_vfmul_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { - return vfmul_vf_f32m2(op1, op2, vl); + return __riscv_vfmul_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmul_vv_f32m4(op1, op2, vl); + return __riscv_vfmul_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { - return vfmul_vf_f32m4(op1, op2, vl); + return __riscv_vfmul_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmul_vv_f32m8(op1, op2, vl); + return __riscv_vfmul_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { - return vfmul_vf_f32m8(op1, op2, vl); + return __riscv_vfmul_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmul_vv_f64m1(op1, op2, vl); + return __riscv_vfmul_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { - return vfmul_vf_f64m1(op1, op2, vl); + return __riscv_vfmul_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmul_vv_f64m2(op1, op2, vl); + return __riscv_vfmul_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { - return vfmul_vf_f64m2(op1, op2, vl); + return __riscv_vfmul_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmul_vv_f64m4(op1, op2, vl); + return __riscv_vfmul_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { - return vfmul_vf_f64m4(op1, op2, vl); + return __riscv_vfmul_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmul_vv_f64m8(op1, op2, vl); + return __riscv_vfmul_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { - return vfmul_vf_f64m8(op1, op2, vl); + return __riscv_vfmul_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmul_vv_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmul_vv_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmul_vv_f16m1_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m1_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmul_vv_f16m2_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m2_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmul_vv_f16m4_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m4_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmul_vv_f16m8_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m8_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmul_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmul_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmul_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfmul_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmul_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfmul_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmul_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfmul_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmul_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfmul_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmul_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfmul_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmul_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfmul_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmul_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfmul_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmul_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfmul_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfmul_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfmul_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv.c index b405a2526dad..6afa157e1acf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmv_v_f_f16mf4(_Float16 src, size_t vl) { - return vfmv_v_f_f16mf4(src, vl); + return __riscv_vfmv_v_f_f16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmv_v_f_f16mf4(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmv_v_f_f16mf2(_Float16 src, size_t vl) { - return vfmv_v_f_f16mf2(src, vl); + return __riscv_vfmv_v_f_f16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfmv_v_f_f16mf2(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmv_v_f_f16m1(_Float16 src, size_t vl) { - return vfmv_v_f_f16m1(src, vl); + return __riscv_vfmv_v_f_f16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfmv_v_f_f16m1(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmv_v_f_f16m2(_Float16 src, size_t vl) { - return vfmv_v_f_f16m2(src, vl); + return __riscv_vfmv_v_f_f16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfmv_v_f_f16m2(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmv_v_f_f16m4(_Float16 src, size_t vl) { - return vfmv_v_f_f16m4(src, vl); + return __riscv_vfmv_v_f_f16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfmv_v_f_f16m4(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmv_v_f_f16m8(_Float16 src, size_t vl) { - return vfmv_v_f_f16m8(src, vl); + return __riscv_vfmv_v_f_f16m8(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfmv_v_f_f16m8(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmv_v_f_f32mf2(float src, size_t vl) { - return vfmv_v_f_f32mf2(src, vl); + return __riscv_vfmv_v_f_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfmv_v_f_f32mf2(float src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmv_v_f_f32m1(float src, size_t vl) { - return vfmv_v_f_f32m1(src, vl); + return __riscv_vfmv_v_f_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfmv_v_f_f32m1(float src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmv_v_f_f32m2(float src, size_t vl) { - return vfmv_v_f_f32m2(src, vl); + return __riscv_vfmv_v_f_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfmv_v_f_f32m2(float src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmv_v_f_f32m4(float src, size_t vl) { - return vfmv_v_f_f32m4(src, vl); + return __riscv_vfmv_v_f_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfmv_v_f_f32m4(float src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmv_v_f_f32m8(float src, size_t vl) { - return vfmv_v_f_f32m8(src, vl); + return __riscv_vfmv_v_f_f32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfmv_v_f_f32m8(float src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmv_v_f_f64m1(double src, size_t vl) { - return vfmv_v_f_f64m1(src, vl); + return __riscv_vfmv_v_f_f64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfmv_v_f_f64m1(double src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmv_v_f_f64m2(double src, size_t vl) { - return vfmv_v_f_f64m2(src, vl); + return __riscv_vfmv_v_f_f64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfmv_v_f_f64m2(double src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmv_v_f_f64m4(double src, size_t vl) { - return vfmv_v_f_f64m4(src, vl); + return __riscv_vfmv_v_f_f64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfmv_v_f_f64m4(double src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmv_v_f_f64m8(double src, size_t vl) { - return vfmv_v_f_f64m8(src, vl); + return __riscv_vfmv_v_f_f64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf4_f16( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfmv_v_f_f64m8(double src, size_t vl) { // CHECK-RV64-NEXT: ret half [[TMP0]] // _Float16 test_vfmv_f_s_f16mf4_f16(vfloat16mf4_t src) { - return vfmv_f_s_f16mf4_f16(src); + return __riscv_vfmv_f_s_f16mf4_f16(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4( @@ -157,7 +157,7 @@ _Float16 test_vfmv_f_s_f16mf4_f16(vfloat16mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmv_s_f_f16mf4(_Float16 src, size_t vl) { - return vfmv_s_f_f16mf4(src, vl); + return __riscv_vfmv_s_f_f16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf2_f16( @@ -166,7 +166,7 @@ vfloat16mf4_t test_vfmv_s_f_f16mf4(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret half [[TMP0]] // _Float16 test_vfmv_f_s_f16mf2_f16(vfloat16mf2_t src) { - return vfmv_f_s_f16mf2_f16(src); + return __riscv_vfmv_f_s_f16mf2_f16(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2( @@ -175,7 +175,7 @@ _Float16 test_vfmv_f_s_f16mf2_f16(vfloat16mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmv_s_f_f16mf2(_Float16 src, size_t vl) { - return vfmv_s_f_f16mf2(src, vl); + return __riscv_vfmv_s_f_f16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m1_f16( @@ -184,7 +184,7 @@ vfloat16mf2_t test_vfmv_s_f_f16mf2(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret half [[TMP0]] // _Float16 test_vfmv_f_s_f16m1_f16(vfloat16m1_t src) { - return vfmv_f_s_f16m1_f16(src); + return __riscv_vfmv_f_s_f16m1_f16(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1( @@ -193,7 +193,7 @@ _Float16 test_vfmv_f_s_f16m1_f16(vfloat16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmv_s_f_f16m1(_Float16 src, size_t vl) { - return vfmv_s_f_f16m1(src, vl); + return __riscv_vfmv_s_f_f16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m2_f16( @@ -202,7 +202,7 @@ vfloat16m1_t test_vfmv_s_f_f16m1(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret half [[TMP0]] // _Float16 test_vfmv_f_s_f16m2_f16(vfloat16m2_t src) { - return vfmv_f_s_f16m2_f16(src); + return __riscv_vfmv_f_s_f16m2_f16(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2( @@ -211,7 +211,7 @@ _Float16 test_vfmv_f_s_f16m2_f16(vfloat16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmv_s_f_f16m2(_Float16 src, size_t vl) { - return vfmv_s_f_f16m2(src, vl); + return __riscv_vfmv_s_f_f16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m4_f16( @@ -220,7 +220,7 @@ vfloat16m2_t test_vfmv_s_f_f16m2(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret half [[TMP0]] // _Float16 test_vfmv_f_s_f16m4_f16(vfloat16m4_t src) { - return vfmv_f_s_f16m4_f16(src); + return __riscv_vfmv_f_s_f16m4_f16(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4( @@ -229,7 +229,7 @@ _Float16 test_vfmv_f_s_f16m4_f16(vfloat16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmv_s_f_f16m4(_Float16 src, size_t vl) { - return vfmv_s_f_f16m4(src, vl); + return __riscv_vfmv_s_f_f16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m8_f16( @@ -238,7 +238,7 @@ vfloat16m4_t test_vfmv_s_f_f16m4(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret half [[TMP0]] // _Float16 test_vfmv_f_s_f16m8_f16(vfloat16m8_t src) { - return vfmv_f_s_f16m8_f16(src); + return __riscv_vfmv_f_s_f16m8_f16(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8( @@ -247,7 +247,7 @@ _Float16 test_vfmv_f_s_f16m8_f16(vfloat16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmv_s_f_f16m8(_Float16 src, size_t vl) { - return vfmv_s_f_f16m8(src, vl); + return __riscv_vfmv_s_f_f16m8(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32( @@ -256,7 +256,7 @@ vfloat16m8_t test_vfmv_s_f_f16m8(_Float16 src, size_t vl) { // CHECK-RV64-NEXT: ret float [[TMP0]] // float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) { - return vfmv_f_s_f32mf2_f32(src); + return __riscv_vfmv_f_s_f32mf2_f32(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2( @@ -265,7 +265,7 @@ float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmv_s_f_f32mf2(float src, size_t vl) { - return vfmv_s_f_f32mf2(src, vl); + return __riscv_vfmv_s_f_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m1_f32( @@ -274,7 +274,7 @@ vfloat32mf2_t test_vfmv_s_f_f32mf2(float src, size_t vl) { // CHECK-RV64-NEXT: ret float [[TMP0]] // float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) { - return vfmv_f_s_f32m1_f32(src); + return __riscv_vfmv_f_s_f32m1_f32(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1( @@ -283,7 +283,7 @@ float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmv_s_f_f32m1(float src, size_t vl) { - return vfmv_s_f_f32m1(src, vl); + return __riscv_vfmv_s_f_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m2_f32( @@ -292,7 +292,7 @@ vfloat32m1_t test_vfmv_s_f_f32m1(float src, size_t vl) { // CHECK-RV64-NEXT: ret float [[TMP0]] // float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) { - return vfmv_f_s_f32m2_f32(src); + return __riscv_vfmv_f_s_f32m2_f32(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2( @@ -301,7 +301,7 @@ float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmv_s_f_f32m2(float src, size_t vl) { - return vfmv_s_f_f32m2(src, vl); + return __riscv_vfmv_s_f_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m4_f32( @@ -310,7 +310,7 @@ vfloat32m2_t test_vfmv_s_f_f32m2(float src, size_t vl) { // CHECK-RV64-NEXT: ret float [[TMP0]] // float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) { - return vfmv_f_s_f32m4_f32(src); + return __riscv_vfmv_f_s_f32m4_f32(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4( @@ -319,7 +319,7 @@ float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmv_s_f_f32m4(float src, size_t vl) { - return vfmv_s_f_f32m4(src, vl); + return __riscv_vfmv_s_f_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m8_f32( @@ -328,7 +328,7 @@ vfloat32m4_t test_vfmv_s_f_f32m4(float src, size_t vl) { // CHECK-RV64-NEXT: ret float [[TMP0]] // float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) { - return vfmv_f_s_f32m8_f32(src); + return __riscv_vfmv_f_s_f32m8_f32(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8( @@ -337,7 +337,7 @@ float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmv_s_f_f32m8(float src, size_t vl) { - return vfmv_s_f_f32m8(src, vl); + return __riscv_vfmv_s_f_f32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m1_f64( @@ -346,7 +346,7 @@ vfloat32m8_t test_vfmv_s_f_f32m8(float src, size_t vl) { // CHECK-RV64-NEXT: ret double [[TMP0]] // double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) { - return vfmv_f_s_f64m1_f64(src); + return __riscv_vfmv_f_s_f64m1_f64(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1( @@ -355,7 +355,7 @@ double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmv_s_f_f64m1(double src, size_t vl) { - return vfmv_s_f_f64m1(src, vl); + return __riscv_vfmv_s_f_f64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m2_f64( @@ -364,7 +364,7 @@ vfloat64m1_t test_vfmv_s_f_f64m1(double src, size_t vl) { // CHECK-RV64-NEXT: ret double [[TMP0]] // double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) { - return vfmv_f_s_f64m2_f64(src); + return __riscv_vfmv_f_s_f64m2_f64(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2( @@ -373,7 +373,7 @@ double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmv_s_f_f64m2(double src, size_t vl) { - return vfmv_s_f_f64m2(src, vl); + return __riscv_vfmv_s_f_f64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m4_f64( @@ -382,7 +382,7 @@ vfloat64m2_t test_vfmv_s_f_f64m2(double src, size_t vl) { // CHECK-RV64-NEXT: ret double [[TMP0]] // double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) { - return vfmv_f_s_f64m4_f64(src); + return __riscv_vfmv_f_s_f64m4_f64(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4( @@ -391,7 +391,7 @@ double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmv_s_f_f64m4(double src, size_t vl) { - return vfmv_s_f_f64m4(src, vl); + return __riscv_vfmv_s_f_f64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m8_f64( @@ -400,7 +400,7 @@ vfloat64m4_t test_vfmv_s_f_f64m4(double src, size_t vl) { // CHECK-RV64-NEXT: ret double [[TMP0]] // double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) { - return vfmv_f_s_f64m8_f64(src); + return __riscv_vfmv_f_s_f64m8_f64(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8( @@ -409,6 +409,6 @@ double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmv_s_f_f64m8(double src, size_t vl) { - return vfmv_s_f_f64m8(src, vl); + return __riscv_vfmv_s_f_f64m8(src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c index 0dee74313632..f2015e53ef8e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { - return vfncvt_x_f_w_i8mf8(src, vl); + return __riscv_vfncvt_x_f_w_i8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8( @@ -22,7 +22,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf8(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4( @@ -31,7 +31,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { - return vfncvt_x_f_w_i8mf4(src, vl); + return __riscv_vfncvt_x_f_w_i8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4( @@ -40,7 +40,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf4(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2( @@ -49,7 +49,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { - return vfncvt_x_f_w_i8mf2(src, vl); + return __riscv_vfncvt_x_f_w_i8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2( @@ -58,7 +58,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf2(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1( @@ -67,7 +67,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { - return vfncvt_x_f_w_i8m1(src, vl); + return __riscv_vfncvt_x_f_w_i8m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1( @@ -76,7 +76,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m1(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2( @@ -85,7 +85,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { - return vfncvt_x_f_w_i8m2(src, vl); + return __riscv_vfncvt_x_f_w_i8m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2( @@ -94,7 +94,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m2(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4( @@ -103,7 +103,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { - return vfncvt_x_f_w_i8m4(src, vl); + return __riscv_vfncvt_x_f_w_i8m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4( @@ -112,7 +112,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m4(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8( @@ -121,7 +121,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf8(src, vl); + return __riscv_vfncvt_xu_f_w_u8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8( @@ -130,7 +130,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf8(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4( @@ -139,7 +139,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf4(src, vl); + return __riscv_vfncvt_xu_f_w_u8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4( @@ -148,7 +148,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf4(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2( @@ -157,7 +157,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf2(src, vl); + return __riscv_vfncvt_xu_f_w_u8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2( @@ -166,7 +166,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf2(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1( @@ -175,7 +175,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { - return vfncvt_xu_f_w_u8m1(src, vl); + return __riscv_vfncvt_xu_f_w_u8m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1( @@ -184,7 +184,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m1(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2( @@ -193,7 +193,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { - return vfncvt_xu_f_w_u8m2(src, vl); + return __riscv_vfncvt_xu_f_w_u8m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2( @@ -202,7 +202,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m2(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4( @@ -211,7 +211,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { - return vfncvt_xu_f_w_u8m4(src, vl); + return __riscv_vfncvt_xu_f_w_u8m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4( @@ -220,7 +220,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m4(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4( @@ -229,7 +229,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { - return vfncvt_x_f_w_i16mf4(src, vl); + return __riscv_vfncvt_x_f_w_i16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( @@ -238,7 +238,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf4(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2( @@ -247,7 +247,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { - return vfncvt_x_f_w_i16mf2(src, vl); + return __riscv_vfncvt_x_f_w_i16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( @@ -256,7 +256,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf2(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1( @@ -265,7 +265,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { - return vfncvt_x_f_w_i16m1(src, vl); + return __riscv_vfncvt_x_f_w_i16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( @@ -274,7 +274,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m1(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2( @@ -283,7 +283,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { - return vfncvt_x_f_w_i16m2(src, vl); + return __riscv_vfncvt_x_f_w_i16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( @@ -292,7 +292,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m2(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4( @@ -301,7 +301,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { - return vfncvt_x_f_w_i16m4(src, vl); + return __riscv_vfncvt_x_f_w_i16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( @@ -310,7 +310,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m4(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4( @@ -319,7 +319,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf4(src, vl); + return __riscv_vfncvt_xu_f_w_u16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( @@ -328,7 +328,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf4(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2( @@ -337,7 +337,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf2(src, vl); + return __riscv_vfncvt_xu_f_w_u16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( @@ -346,7 +346,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf2(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1( @@ -355,7 +355,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { - return vfncvt_xu_f_w_u16m1(src, vl); + return __riscv_vfncvt_xu_f_w_u16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( @@ -364,7 +364,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m1(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2( @@ -373,7 +373,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { - return vfncvt_xu_f_w_u16m2(src, vl); + return __riscv_vfncvt_xu_f_w_u16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( @@ -382,7 +382,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m2(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4( @@ -391,7 +391,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { - return vfncvt_xu_f_w_u16m4(src, vl); + return __riscv_vfncvt_xu_f_w_u16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( @@ -400,7 +400,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m4(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4( @@ -409,7 +409,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t src, size_t vl) { - return vfncvt_f_x_w_f16mf4(src, vl); + return __riscv_vfncvt_f_x_w_f16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2( @@ -418,7 +418,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t src, size_t vl) { - return vfncvt_f_x_w_f16mf2(src, vl); + return __riscv_vfncvt_f_x_w_f16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1( @@ -427,7 +427,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t src, size_t vl) { - return vfncvt_f_x_w_f16m1(src, vl); + return __riscv_vfncvt_f_x_w_f16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2( @@ -436,7 +436,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t src, size_t vl) { - return vfncvt_f_x_w_f16m2(src, vl); + return __riscv_vfncvt_f_x_w_f16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4( @@ -445,7 +445,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t src, size_t vl) { - return vfncvt_f_x_w_f16m4(src, vl); + return __riscv_vfncvt_f_x_w_f16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4( @@ -454,7 +454,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf4(src, vl); + return __riscv_vfncvt_f_xu_w_f16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2( @@ -463,7 +463,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf2(src, vl); + return __riscv_vfncvt_f_xu_w_f16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1( @@ -472,7 +472,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t src, size_t vl) { - return vfncvt_f_xu_w_f16m1(src, vl); + return __riscv_vfncvt_f_xu_w_f16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2( @@ -481,7 +481,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t src, size_t vl) { - return vfncvt_f_xu_w_f16m2(src, vl); + return __riscv_vfncvt_f_xu_w_f16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4( @@ -490,7 +490,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t src, size_t vl) { - return vfncvt_f_xu_w_f16m4(src, vl); + return __riscv_vfncvt_f_xu_w_f16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4( @@ -499,7 +499,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { - return vfncvt_f_f_w_f16mf4(src, vl); + return __riscv_vfncvt_f_f_w_f16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4( @@ -508,7 +508,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf4(src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2( @@ -517,7 +517,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { - return vfncvt_f_f_w_f16mf2(src, vl); + return __riscv_vfncvt_f_f_w_f16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2( @@ -526,7 +526,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf2(src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1( @@ -535,7 +535,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { - return vfncvt_f_f_w_f16m1(src, vl); + return __riscv_vfncvt_f_f_w_f16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1( @@ -544,7 +544,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m1(src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2( @@ -553,7 +553,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { - return vfncvt_f_f_w_f16m2(src, vl); + return __riscv_vfncvt_f_f_w_f16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2( @@ -562,7 +562,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m2(src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4( @@ -571,7 +571,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { - return vfncvt_f_f_w_f16m4(src, vl); + return __riscv_vfncvt_f_f_w_f16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4( @@ -580,7 +580,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m4(src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2( @@ -589,7 +589,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { - return vfncvt_x_f_w_i32mf2(src, vl); + return __riscv_vfncvt_x_f_w_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( @@ -598,7 +598,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32mf2(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1( @@ -607,7 +607,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { - return vfncvt_x_f_w_i32m1(src, vl); + return __riscv_vfncvt_x_f_w_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( @@ -616,7 +616,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m1(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2( @@ -625,7 +625,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { - return vfncvt_x_f_w_i32m2(src, vl); + return __riscv_vfncvt_x_f_w_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( @@ -634,7 +634,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m2(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4( @@ -643,7 +643,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { - return vfncvt_x_f_w_i32m4(src, vl); + return __riscv_vfncvt_x_f_w_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( @@ -652,7 +652,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m4(src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2( @@ -661,7 +661,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { - return vfncvt_xu_f_w_u32mf2(src, vl); + return __riscv_vfncvt_xu_f_w_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( @@ -670,7 +670,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32mf2(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1( @@ -679,7 +679,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { - return vfncvt_xu_f_w_u32m1(src, vl); + return __riscv_vfncvt_xu_f_w_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( @@ -688,7 +688,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m1(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2( @@ -697,7 +697,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { - return vfncvt_xu_f_w_u32m2(src, vl); + return __riscv_vfncvt_xu_f_w_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( @@ -706,7 +706,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m2(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4( @@ -715,7 +715,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { - return vfncvt_xu_f_w_u32m4(src, vl); + return __riscv_vfncvt_xu_f_w_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( @@ -724,7 +724,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m4(src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2( @@ -733,7 +733,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) { - return vfncvt_f_x_w_f32mf2(src, vl); + return __riscv_vfncvt_f_x_w_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1( @@ -742,7 +742,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) { - return vfncvt_f_x_w_f32m1(src, vl); + return __riscv_vfncvt_f_x_w_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2( @@ -751,7 +751,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) { - return vfncvt_f_x_w_f32m2(src, vl); + return __riscv_vfncvt_f_x_w_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4( @@ -760,7 +760,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) { - return vfncvt_f_x_w_f32m4(src, vl); + return __riscv_vfncvt_f_x_w_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2( @@ -769,7 +769,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) { - return vfncvt_f_xu_w_f32mf2(src, vl); + return __riscv_vfncvt_f_xu_w_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1( @@ -778,7 +778,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) { - return vfncvt_f_xu_w_f32m1(src, vl); + return __riscv_vfncvt_f_xu_w_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2( @@ -787,7 +787,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) { - return vfncvt_f_xu_w_f32m2(src, vl); + return __riscv_vfncvt_f_xu_w_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4( @@ -796,7 +796,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) { - return vfncvt_f_xu_w_f32m4(src, vl); + return __riscv_vfncvt_f_xu_w_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2( @@ -805,7 +805,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { - return vfncvt_f_f_w_f32mf2(src, vl); + return __riscv_vfncvt_f_f_w_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( @@ -814,7 +814,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32mf2(src, vl); + return __riscv_vfncvt_rod_f_f_w_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1( @@ -823,7 +823,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { - return vfncvt_f_f_w_f32m1(src, vl); + return __riscv_vfncvt_f_f_w_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1( @@ -832,7 +832,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m1(src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2( @@ -841,7 +841,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { - return vfncvt_f_f_w_f32m2(src, vl); + return __riscv_vfncvt_f_f_w_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2( @@ -850,7 +850,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m2(src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4( @@ -859,7 +859,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { - return vfncvt_f_f_w_f32m4(src, vl); + return __riscv_vfncvt_f_f_w_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4( @@ -868,7 +868,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m4(src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_m( @@ -877,7 +877,7 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfncvt_x_f_w_i8mf8_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( @@ -886,7 +886,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf8_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_m( @@ -895,7 +895,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfncvt_x_f_w_i8mf4_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( @@ -904,7 +904,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf4_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_m( @@ -913,7 +913,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfncvt_x_f_w_i8mf2_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( @@ -922,7 +922,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf2_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_m( @@ -931,7 +931,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfncvt_x_f_w_i8m1_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( @@ -940,7 +940,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m1_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_m( @@ -949,7 +949,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfncvt_x_f_w_i8m2_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( @@ -958,7 +958,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m2_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_m( @@ -967,7 +967,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return vfncvt_x_f_w_i8m4_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( @@ -976,7 +976,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m4_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_m( @@ -985,7 +985,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf8_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( @@ -994,7 +994,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf8_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_m( @@ -1003,7 +1003,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf4_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( @@ -1012,7 +1012,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf4_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_m( @@ -1021,7 +1021,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf2_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( @@ -1030,7 +1030,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf2_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_m( @@ -1039,7 +1039,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfncvt_xu_f_w_u8m1_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( @@ -1048,7 +1048,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m1_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_m( @@ -1057,7 +1057,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfncvt_xu_f_w_u8m2_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( @@ -1066,7 +1066,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m2_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_m( @@ -1075,7 +1075,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return vfncvt_xu_f_w_u8m4_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( @@ -1084,7 +1084,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m4_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( @@ -1093,7 +1093,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfncvt_x_f_w_i16mf4_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( @@ -1102,7 +1102,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf4_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( @@ -1111,7 +1111,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfncvt_x_f_w_i16mf2_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( @@ -1120,7 +1120,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf2_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( @@ -1129,7 +1129,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfncvt_x_f_w_i16m1_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( @@ -1138,7 +1138,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m1_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( @@ -1147,7 +1147,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfncvt_x_f_w_i16m2_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( @@ -1156,7 +1156,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m2_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( @@ -1165,7 +1165,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vfncvt_x_f_w_i16m4_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( @@ -1174,7 +1174,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m4_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( @@ -1183,7 +1183,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf4_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( @@ -1192,7 +1192,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf4_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( @@ -1201,7 +1201,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf2_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( @@ -1210,7 +1210,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf2_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( @@ -1219,7 +1219,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfncvt_xu_f_w_u16m1_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( @@ -1228,7 +1228,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m1_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( @@ -1237,7 +1237,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfncvt_xu_f_w_u16m2_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( @@ -1246,7 +1246,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m2_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( @@ -1255,7 +1255,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vfncvt_xu_f_w_u16m4_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( @@ -1264,7 +1264,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m4_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_m( @@ -1273,7 +1273,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return vfncvt_f_x_w_f16mf4_m(mask, src, vl); + return __riscv_vfncvt_f_x_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_m( @@ -1282,7 +1282,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vint32mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return vfncvt_f_x_w_f16mf2_m(mask, src, vl); + return __riscv_vfncvt_f_x_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_m( @@ -1291,7 +1291,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vint32m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return vfncvt_f_x_w_f16m1_m(mask, src, vl); + return __riscv_vfncvt_f_x_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_m( @@ -1300,7 +1300,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vint32m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return vfncvt_f_x_w_f16m2_m(mask, src, vl); + return __riscv_vfncvt_f_x_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_m( @@ -1309,7 +1309,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return vfncvt_f_x_w_f16m4_m(mask, src, vl); + return __riscv_vfncvt_f_x_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_m( @@ -1318,7 +1318,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf4_m(mask, src, vl); + return __riscv_vfncvt_f_xu_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_m( @@ -1327,7 +1327,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vuint32mf2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf2_m(mask, src, vl); + return __riscv_vfncvt_f_xu_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_m( @@ -1336,7 +1336,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vuint32m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return vfncvt_f_xu_w_f16m1_m(mask, src, vl); + return __riscv_vfncvt_f_xu_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_m( @@ -1345,7 +1345,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vuint32m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return vfncvt_f_xu_w_f16m2_m(mask, src, vl); + return __riscv_vfncvt_f_xu_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_m( @@ -1354,7 +1354,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vuint32m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return vfncvt_f_xu_w_f16m4_m(mask, src, vl); + return __riscv_vfncvt_f_xu_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_m( @@ -1363,7 +1363,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vuint32m8_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfncvt_f_f_w_f16mf4_m(mask, src, vl); + return __riscv_vfncvt_f_f_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( @@ -1372,7 +1372,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf4_m(mask, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_m( @@ -1381,7 +1381,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfncvt_f_f_w_f16mf2_m(mask, src, vl); + return __riscv_vfncvt_f_f_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( @@ -1390,7 +1390,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf2_m(mask, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_m( @@ -1399,7 +1399,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfncvt_f_f_w_f16m1_m(mask, src, vl); + return __riscv_vfncvt_f_f_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( @@ -1408,7 +1408,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m1_m(mask, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_m( @@ -1417,7 +1417,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfncvt_f_f_w_f16m2_m(mask, src, vl); + return __riscv_vfncvt_f_f_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( @@ -1426,7 +1426,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m2_m(mask, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_m( @@ -1435,7 +1435,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vfncvt_f_f_w_f16m4_m(mask, src, vl); + return __riscv_vfncvt_f_f_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( @@ -1444,7 +1444,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m4_m(mask, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( @@ -1453,7 +1453,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vfncvt_x_f_w_i32mf2_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( @@ -1462,7 +1462,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32mf2_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( @@ -1471,7 +1471,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vfncvt_x_f_w_i32m1_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( @@ -1480,7 +1480,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m1_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( @@ -1489,7 +1489,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vfncvt_x_f_w_i32m2_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( @@ -1498,7 +1498,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m2_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( @@ -1507,7 +1507,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vfncvt_x_f_w_i32m4_m(mask, src, vl); + return __riscv_vfncvt_x_f_w_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( @@ -1516,7 +1516,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m4_m(mask, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( @@ -1525,7 +1525,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vfncvt_xu_f_w_u32mf2_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( @@ -1534,7 +1534,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32mf2_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( @@ -1543,7 +1543,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vfncvt_xu_f_w_u32m1_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( @@ -1552,7 +1552,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m1_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( @@ -1561,7 +1561,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vfncvt_xu_f_w_u32m2_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( @@ -1570,7 +1570,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m2_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( @@ -1579,7 +1579,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vfncvt_xu_f_w_u32m4_m(mask, src, vl); + return __riscv_vfncvt_xu_f_w_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( @@ -1588,7 +1588,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m4_m(mask, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( @@ -1597,7 +1597,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return vfncvt_f_x_w_f32mf2_m(mask, src, vl); + return __riscv_vfncvt_f_x_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m( @@ -1606,7 +1606,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vint64m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return vfncvt_f_x_w_f32m1_m(mask, src, vl); + return __riscv_vfncvt_f_x_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m( @@ -1615,7 +1615,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vint64m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return vfncvt_f_x_w_f32m2_m(mask, src, vl); + return __riscv_vfncvt_f_x_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m( @@ -1624,7 +1624,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vint64m4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return vfncvt_f_x_w_f32m4_m(mask, src, vl); + return __riscv_vfncvt_f_x_w_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( @@ -1633,7 +1633,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return vfncvt_f_xu_w_f32mf2_m(mask, src, vl); + return __riscv_vfncvt_f_xu_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m( @@ -1642,7 +1642,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vuint64m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return vfncvt_f_xu_w_f32m1_m(mask, src, vl); + return __riscv_vfncvt_f_xu_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m( @@ -1651,7 +1651,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vuint64m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return vfncvt_f_xu_w_f32m2_m(mask, src, vl); + return __riscv_vfncvt_f_xu_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m( @@ -1660,7 +1660,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vuint64m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return vfncvt_f_xu_w_f32m4_m(mask, src, vl); + return __riscv_vfncvt_f_xu_w_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m( @@ -1669,7 +1669,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vuint64m8_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vfncvt_f_f_w_f32mf2_m(mask, src, vl); + return __riscv_vfncvt_f_f_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( @@ -1678,7 +1678,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32mf2_m(mask, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( @@ -1687,7 +1687,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vfncvt_f_f_w_f32m1_m(mask, src, vl); + return __riscv_vfncvt_f_f_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( @@ -1696,7 +1696,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m1_m(mask, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( @@ -1705,7 +1705,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vfncvt_f_f_w_f32m2_m(mask, src, vl); + return __riscv_vfncvt_f_f_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( @@ -1714,7 +1714,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m2_m(mask, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( @@ -1723,7 +1723,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vfncvt_f_f_w_f32m4_m(mask, src, vl); + return __riscv_vfncvt_f_f_w_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( @@ -1732,6 +1732,6 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m4_m(mask, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m4_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c index ca6dd926f96a..e777cfa8e8c4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfneg.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfneg_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return vfneg_v_f16mf4(op1, vl); + return __riscv_vfneg_v_f16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4(vfloat16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfneg_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return vfneg_v_f16mf2(op1, vl); + return __riscv_vfneg_v_f16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2(vfloat16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfneg_v_f16m1(vfloat16m1_t op1, size_t vl) { - return vfneg_v_f16m1(op1, vl); + return __riscv_vfneg_v_f16m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfneg_v_f16m1(vfloat16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfneg_v_f16m2(vfloat16m2_t op1, size_t vl) { - return vfneg_v_f16m2(op1, vl); + return __riscv_vfneg_v_f16m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfneg_v_f16m2(vfloat16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfneg_v_f16m4(vfloat16m4_t op1, size_t vl) { - return vfneg_v_f16m4(op1, vl); + return __riscv_vfneg_v_f16m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfneg_v_f16m4(vfloat16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfneg_v_f16m8(vfloat16m8_t op1, size_t vl) { - return vfneg_v_f16m8(op1, vl); + return __riscv_vfneg_v_f16m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfneg_v_f16m8(vfloat16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return vfneg_v_f32mf2(op1, vl); + return __riscv_vfneg_v_f32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2(vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1(vfloat32m1_t op1, size_t vl) { - return vfneg_v_f32m1(op1, vl); + return __riscv_vfneg_v_f32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfneg_v_f32m1(vfloat32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2(vfloat32m2_t op1, size_t vl) { - return vfneg_v_f32m2(op1, vl); + return __riscv_vfneg_v_f32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfneg_v_f32m2(vfloat32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4(vfloat32m4_t op1, size_t vl) { - return vfneg_v_f32m4(op1, vl); + return __riscv_vfneg_v_f32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfneg_v_f32m4(vfloat32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8(vfloat32m8_t op1, size_t vl) { - return vfneg_v_f32m8(op1, vl); + return __riscv_vfneg_v_f32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfneg_v_f32m8(vfloat32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1(vfloat64m1_t op1, size_t vl) { - return vfneg_v_f64m1(op1, vl); + return __riscv_vfneg_v_f64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfneg_v_f64m1(vfloat64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2(vfloat64m2_t op1, size_t vl) { - return vfneg_v_f64m2(op1, vl); + return __riscv_vfneg_v_f64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfneg_v_f64m2(vfloat64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4(vfloat64m4_t op1, size_t vl) { - return vfneg_v_f64m4(op1, vl); + return __riscv_vfneg_v_f64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfneg_v_f64m4(vfloat64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8(vfloat64m8_t op1, size_t vl) { - return vfneg_v_f64m8(op1, vl); + return __riscv_vfneg_v_f64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_m( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfneg_v_f64m8(vfloat64m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return vfneg_v_f16mf4_m(mask, op1, vl); + return __riscv_vfneg_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_m( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return vfneg_v_f16mf2_m(mask, op1, vl); + return __riscv_vfneg_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m1_m( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return vfneg_v_f16m1_m(mask, op1, vl); + return __riscv_vfneg_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m2_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return vfneg_v_f16m2_m(mask, op1, vl); + return __riscv_vfneg_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m4_m( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return vfneg_v_f16m4_m(mask, op1, vl); + return __riscv_vfneg_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m8_m( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return vfneg_v_f16m8_m(mask, op1, vl); + return __riscv_vfneg_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return vfneg_v_f32mf2_m(mask, op1, vl); + return __riscv_vfneg_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return vfneg_v_f32m1_m(mask, op1, vl); + return __riscv_vfneg_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return vfneg_v_f32m2_m(mask, op1, vl); + return __riscv_vfneg_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return vfneg_v_f32m4_m(mask, op1, vl); + return __riscv_vfneg_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return vfneg_v_f32m8_m(mask, op1, vl); + return __riscv_vfneg_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return vfneg_v_f64m1_m(mask, op1, vl); + return __riscv_vfneg_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return vfneg_v_f64m2_m(mask, op1, vl); + return __riscv_vfneg_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return vfneg_v_f64m4_m(mask, op1, vl); + return __riscv_vfneg_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m( @@ -274,6 +274,6 @@ vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return vfneg_v_f64m8_m(mask, op1, vl); + return __riscv_vfneg_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c index aabbe0ccae87..d3c02a0adb94 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vv_f16mf4(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vf_f16mf4(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vv_f16mf2(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vf_f16mf2(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vv_f16m1(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vf_f16m1(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vv_f16m2(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vf_f16m2(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vv_f16m4(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vf_f16m4(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vv_f16m8(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vf_f16m8(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vf_f32mf2(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vf_f32m1(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vf_f32m2(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vf_f32m4(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vf_f32m8(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vf_f64m1(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vf_f64m2(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vf_f64m4(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vf_f64m8(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vv_f16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vf_f16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vv_f16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vf_f16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vv_f16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vf_f16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vv_f16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vf_f16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vv_f16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vf_f16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vf_f16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vf_f32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vf_f32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vf_f32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vf_f32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vf_f32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vf_f64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vf_f64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vf_f64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vf_f64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c index c2cf632313db..563abe3ac4ad 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmadd.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vv_f16mf4(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vf_f16mf4(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vv_f16mf2(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vf_f16mf2(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vv_f16m1(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vf_f16m1(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vv_f16m2(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vf_f16m2(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vv_f16m4(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vf_f16m4(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vv_f16m8(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vf_f16m8(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vf_f32mf2(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vf_f32m1(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vf_f32m2(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vf_f32m4(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vf_f32m8(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vf_f64m1(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vf_f64m2(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vf_f64m4(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vf_f64m8(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vv_f16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vf_f16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vv_f16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vf_f16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vv_f16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vf_f16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vv_f16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vf_f16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vv_f16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vf_f16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vf_f16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vf_f32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vf_f32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vf_f32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vf_f32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vf_f32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vf_f64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vf_f64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vf_f64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vf_f64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c index 6beaaa888613..55601d7884fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsac.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vv_f16mf4(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vf_f16mf4(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vv_f16mf2(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vf_f16mf2(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vv_f16m1(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vf_f16m1(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vv_f16m2(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vf_f16m2(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vv_f16m4(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vf_f16m4(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vv_f16m8(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vf_f16m8(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vf_f32mf2(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vf_f32m1(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vf_f32m2(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vf_f32m4(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vf_f32m8(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vf_f64m1(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vf_f64m2(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vf_f64m4(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vf_f64m8(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vv_f16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vf_f16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vv_f16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vf_f16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vv_f16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vf_f16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vv_f16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vf_f16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vv_f16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vf_f16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vf_f16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vf_f32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vf_f32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vf_f32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vf_f32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vf_f32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vf_f64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vf_f64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vf_f64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vf_f64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c index 23ea41ea6086..5498000e4a2c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfnmsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vv_f16mf4(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vf_f16mf4(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vv_f16mf2(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vf_f16mf2(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vv_f16m1(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vf_f16m1(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vv_f16m2(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vf_f16m2(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vv_f16m4(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vf_f16m4(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vv_f16m8(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vf_f16m8(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vf_f32mf2(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t vd, float rs1, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vf_f32m1(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vf_f32m2(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vf_f32m4(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vf_f32m8(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vf_f64m1(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vf_f64m2(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vf_f64m4(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vf_f64m8(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vv_f16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vf_f16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vv_f16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vf_f16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vv_f16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vf_f16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vv_f16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vf_f16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vv_f16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vf_f16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vf_f16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vf_f32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vf_f32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vf_f32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vf_f32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vf_f32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vf_f64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vf_f64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vf_f64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vf_f64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrdiv.c index 612dcc5c6e7e..6763004c9b8b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrdiv.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf4(op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf2(op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m1(op1, op2, vl); + return __riscv_vfrdiv_vf_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m2(op1, op2, vl); + return __riscv_vfrdiv_vf_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m4(op1, op2, vl); + return __riscv_vfrdiv_vf_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m8(op1, op2, vl); + return __riscv_vfrdiv_vf_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32mf2(op1, op2, vl); + return __riscv_vfrdiv_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m1(op1, op2, vl); + return __riscv_vfrdiv_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m2(op1, op2, vl); + return __riscv_vfrdiv_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m4(op1, op2, vl); + return __riscv_vfrdiv_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m8(op1, op2, vl); + return __riscv_vfrdiv_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m1(op1, op2, vl); + return __riscv_vfrdiv_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m2(op1, op2, vl); + return __riscv_vfrdiv_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m4(op1, op2, vl); + return __riscv_vfrdiv_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m8(op1, op2, vl); + return __riscv_vfrdiv_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_m( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_m( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_m( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m1_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m2_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_m( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m4_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_m( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m8_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m( @@ -274,6 +274,6 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c index ab53165b6a85..2f6f66ec31cd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrec7.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return vfrec7_v_f16mf4(op1, vl); + return __riscv_vfrec7_v_f16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return vfrec7_v_f16mf2(op1, vl); + return __riscv_vfrec7_v_f16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) { - return vfrec7_v_f16m1(op1, vl); + return __riscv_vfrec7_v_f16m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) { - return vfrec7_v_f16m2(op1, vl); + return __riscv_vfrec7_v_f16m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) { - return vfrec7_v_f16m4(op1, vl); + return __riscv_vfrec7_v_f16m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) { - return vfrec7_v_f16m8(op1, vl); + return __riscv_vfrec7_v_f16m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return vfrec7_v_f32mf2(op1, vl); + return __riscv_vfrec7_v_f32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) { - return vfrec7_v_f32m1(op1, vl); + return __riscv_vfrec7_v_f32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) { - return vfrec7_v_f32m2(op1, vl); + return __riscv_vfrec7_v_f32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) { - return vfrec7_v_f32m4(op1, vl); + return __riscv_vfrec7_v_f32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) { - return vfrec7_v_f32m8(op1, vl); + return __riscv_vfrec7_v_f32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) { - return vfrec7_v_f64m1(op1, vl); + return __riscv_vfrec7_v_f64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) { - return vfrec7_v_f64m2(op1, vl); + return __riscv_vfrec7_v_f64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) { - return vfrec7_v_f64m4(op1, vl); + return __riscv_vfrec7_v_f64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { - return vfrec7_v_f64m8(op1, vl); + return __riscv_vfrec7_v_f64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_m( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return vfrec7_v_f16mf4_m(mask, op1, vl); + return __riscv_vfrec7_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_m( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return vfrec7_v_f16mf2_m(mask, op1, vl); + return __riscv_vfrec7_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_m( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return vfrec7_v_f16m1_m(mask, op1, vl); + return __riscv_vfrec7_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return vfrec7_v_f16m2_m(mask, op1, vl); + return __riscv_vfrec7_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_m( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return vfrec7_v_f16m4_m(mask, op1, vl); + return __riscv_vfrec7_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_m( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return vfrec7_v_f16m8_m(mask, op1, vl); + return __riscv_vfrec7_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return vfrec7_v_f32mf2_m(mask, op1, vl); + return __riscv_vfrec7_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return vfrec7_v_f32m1_m(mask, op1, vl); + return __riscv_vfrec7_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return vfrec7_v_f32m2_m(mask, op1, vl); + return __riscv_vfrec7_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return vfrec7_v_f32m4_m(mask, op1, vl); + return __riscv_vfrec7_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return vfrec7_v_f32m8_m(mask, op1, vl); + return __riscv_vfrec7_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return vfrec7_v_f64m1_m(mask, op1, vl); + return __riscv_vfrec7_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return vfrec7_v_f64m2_m(mask, op1, vl); + return __riscv_vfrec7_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return vfrec7_v_f64m4_m(mask, op1, vl); + return __riscv_vfrec7_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m( @@ -274,6 +274,6 @@ vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return vfrec7_v_f64m8_m(mask, op1, vl); + return __riscv_vfrec7_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c index 749293f86a0d..152a46abbb9e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf4_f16m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f16mf4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1( @@ -22,7 +22,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf2_f16m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f16mf2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1( @@ -31,7 +31,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m1_f16m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f16m1_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m2_f16m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f16m2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1( @@ -49,7 +49,7 @@ vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m4_f16m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f16m4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m8_f16m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f16m8_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32mf2_f32m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f32mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m1_f32m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f32m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m2_f32m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f32m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1( @@ -94,7 +94,7 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m4_f32m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f32m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1( @@ -103,7 +103,7 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m8_f32m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f32m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1( @@ -112,7 +112,7 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m1_f64m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f64m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m2_f64m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f64m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1( @@ -130,7 +130,7 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m4_f64m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f64m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1( @@ -139,7 +139,7 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m8_f64m1(vector, scalar, vl); + return __riscv_vfredmax_vs_f64m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m( @@ -148,7 +148,7 @@ vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m( @@ -157,7 +157,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m( @@ -166,7 +166,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m1_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m1_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m2_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m( @@ -184,7 +184,7 @@ vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m4_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m( @@ -193,7 +193,7 @@ vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m8_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m8_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( @@ -202,7 +202,7 @@ vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m1_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( @@ -229,7 +229,7 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m4_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( @@ -238,7 +238,7 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m8_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m1_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m2_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( @@ -265,7 +265,7 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m4_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( @@ -274,6 +274,6 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m8_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c index 589dc721e7f3..e22d0070dd49 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf4_f16m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f16mf4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1( @@ -22,7 +22,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf2_f16m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f16mf2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1( @@ -31,7 +31,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m1_f16m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f16m1_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m2_f16m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f16m2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1( @@ -49,7 +49,7 @@ vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m4_f16m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f16m4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m8_f16m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f16m8_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32mf2_f32m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f32mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m1_f32m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f32m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m2_f32m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f32m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1( @@ -94,7 +94,7 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m4_f32m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f32m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1( @@ -103,7 +103,7 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m8_f32m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f32m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1( @@ -112,7 +112,7 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m1_f64m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f64m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m2_f64m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f64m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1( @@ -130,7 +130,7 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m4_f64m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f64m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1( @@ -139,7 +139,7 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m8_f64m1(vector, scalar, vl); + return __riscv_vfredmin_vs_f64m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m( @@ -148,7 +148,7 @@ vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m( @@ -157,7 +157,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m( @@ -166,7 +166,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m1_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m1_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m2_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m( @@ -184,7 +184,7 @@ vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m4_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m( @@ -193,7 +193,7 @@ vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m8_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m8_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( @@ -202,7 +202,7 @@ vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m1_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( @@ -229,7 +229,7 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m4_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( @@ -238,7 +238,7 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m8_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m1_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m2_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( @@ -265,7 +265,7 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m4_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( @@ -274,6 +274,6 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m8_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c index 5203bb3453b3..00390a6f7b0c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf4_f16m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f16mf4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1( @@ -22,7 +22,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf2_f16m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f16mf2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1( @@ -31,7 +31,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m1_f16m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f16m1_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m2_f16m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f16m2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1( @@ -49,7 +49,7 @@ vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m4_f16m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f16m4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m8_f16m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f16m8_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f32mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m1_f32m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f32m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m2_f32m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f32m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1( @@ -94,7 +94,7 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m4_f32m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f32m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1( @@ -103,7 +103,7 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m8_f32m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f32m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1( @@ -112,7 +112,7 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m1_f64m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f64m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m2_f64m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f64m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1( @@ -130,7 +130,7 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m4_f64m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f64m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1( @@ -139,7 +139,7 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m8_f64m1(vector, scalar, vl); + return __riscv_vfredosum_vs_f64m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m( @@ -148,7 +148,7 @@ vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m( @@ -157,7 +157,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m( @@ -166,7 +166,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m( @@ -184,7 +184,7 @@ vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m( @@ -193,7 +193,7 @@ vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( @@ -202,7 +202,7 @@ vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( @@ -229,7 +229,7 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( @@ -238,7 +238,7 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( @@ -265,7 +265,7 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( @@ -274,6 +274,6 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c index 7a9a98c3c03a..ce43f60caf5c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf4_f16m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f16mf4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1( @@ -22,7 +22,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf2_f16m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f16mf2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1( @@ -31,7 +31,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m1_f16m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f16m1_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m2_f16m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f16m2_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1( @@ -49,7 +49,7 @@ vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m4_f16m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f16m4_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m8_f16m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f16m8_f16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f32mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m1_f32m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f32m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m2_f32m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f32m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1( @@ -94,7 +94,7 @@ vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m4_f32m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f32m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1( @@ -103,7 +103,7 @@ vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m8_f32m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f32m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1( @@ -112,7 +112,7 @@ vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m1_f64m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f64m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m2_f64m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f64m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1( @@ -130,7 +130,7 @@ vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m4_f64m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f64m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1( @@ -139,7 +139,7 @@ vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m8_f64m1(vector, scalar, vl); + return __riscv_vfredusum_vs_f64m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m( @@ -148,7 +148,7 @@ vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t sca // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m( @@ -157,7 +157,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m( @@ -166,7 +166,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m( @@ -184,7 +184,7 @@ vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m( @@ -193,7 +193,7 @@ vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m( @@ -202,7 +202,7 @@ vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m( @@ -229,7 +229,7 @@ vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m( @@ -238,7 +238,7 @@ vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m( @@ -265,7 +265,7 @@ vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m( @@ -274,6 +274,6 @@ vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7.c index c703edbd61b6..d333159528f6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsqrt7.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return vfrsqrt7_v_f16mf4(op1, vl); + return __riscv_vfrsqrt7_v_f16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return vfrsqrt7_v_f16mf2(op1, vl); + return __riscv_vfrsqrt7_v_f16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t op1, size_t vl) { - return vfrsqrt7_v_f16m1(op1, vl); + return __riscv_vfrsqrt7_v_f16m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t op1, size_t vl) { - return vfrsqrt7_v_f16m2(op1, vl); + return __riscv_vfrsqrt7_v_f16m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t op1, size_t vl) { - return vfrsqrt7_v_f16m4(op1, vl); + return __riscv_vfrsqrt7_v_f16m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsqrt7_v_f16m8(vfloat16m8_t op1, size_t vl) { - return vfrsqrt7_v_f16m8(op1, vl); + return __riscv_vfrsqrt7_v_f16m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8(vfloat16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return vfrsqrt7_v_f32mf2(op1, vl); + return __riscv_vfrsqrt7_v_f32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) { - return vfrsqrt7_v_f32m1(op1, vl); + return __riscv_vfrsqrt7_v_f32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) { - return vfrsqrt7_v_f32m2(op1, vl); + return __riscv_vfrsqrt7_v_f32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) { - return vfrsqrt7_v_f32m4(op1, vl); + return __riscv_vfrsqrt7_v_f32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) { - return vfrsqrt7_v_f32m8(op1, vl); + return __riscv_vfrsqrt7_v_f32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) { - return vfrsqrt7_v_f64m1(op1, vl); + return __riscv_vfrsqrt7_v_f64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) { - return vfrsqrt7_v_f64m2(op1, vl); + return __riscv_vfrsqrt7_v_f64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) { - return vfrsqrt7_v_f64m4(op1, vl); + return __riscv_vfrsqrt7_v_f64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { - return vfrsqrt7_v_f64m8(op1, vl); + return __riscv_vfrsqrt7_v_f64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_m( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return vfrsqrt7_v_f16mf4_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_m( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return vfrsqrt7_v_f16mf2_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_m( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return vfrsqrt7_v_f16m1_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return vfrsqrt7_v_f16m2_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_m( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return vfrsqrt7_v_f16m4_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_m( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return vfrsqrt7_v_f16m8_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return vfrsqrt7_v_f32mf2_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return vfrsqrt7_v_f32m1_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return vfrsqrt7_v_f32m2_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return vfrsqrt7_v_f32m4_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return vfrsqrt7_v_f32m8_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return vfrsqrt7_v_f64m1_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return vfrsqrt7_v_f64m2_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return vfrsqrt7_v_f64m4_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m( @@ -274,6 +274,6 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return vfrsqrt7_v_f64m8_m(mask, op1, vl); + return __riscv_vfrsqrt7_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub.c index e6ac0f997a5d..eedb3e79e863 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfrsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf4(op1, op2, vl); + return __riscv_vfrsub_vf_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf2(op1, op2, vl); + return __riscv_vfrsub_vf_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m1(op1, op2, vl); + return __riscv_vfrsub_vf_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m2(op1, op2, vl); + return __riscv_vfrsub_vf_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m4(op1, op2, vl); + return __riscv_vfrsub_vf_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m8(op1, op2, vl); + return __riscv_vfrsub_vf_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32mf2(op1, op2, vl); + return __riscv_vfrsub_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m1(op1, op2, vl); + return __riscv_vfrsub_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m2(op1, op2, vl); + return __riscv_vfrsub_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m4(op1, op2, vl); + return __riscv_vfrsub_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m8(op1, op2, vl); + return __riscv_vfrsub_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m1(op1, op2, vl); + return __riscv_vfrsub_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m2(op1, op2, vl); + return __riscv_vfrsub_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m4(op1, op2, vl); + return __riscv_vfrsub_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m8(op1, op2, vl); + return __riscv_vfrsub_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_m( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_m( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_m( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m1_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m2_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_m( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m4_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_m( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m8_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m( @@ -274,6 +274,6 @@ vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfrsub_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj.c index 998e812d119a..48f6afae9b8d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnj.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnj_vv_f16mf4(op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf4(op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnj_vv_f16mf2(op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf2(op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnj_vv_f16m1(op1, op2, vl); + return __riscv_vfsgnj_vv_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m1(op1, op2, vl); + return __riscv_vfsgnj_vf_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnj_vv_f16m2(op1, op2, vl); + return __riscv_vfsgnj_vv_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m2(op1, op2, vl); + return __riscv_vfsgnj_vf_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnj_vv_f16m4(op1, op2, vl); + return __riscv_vfsgnj_vv_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m4(op1, op2, vl); + return __riscv_vfsgnj_vf_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnj_vv_f16m8(op1, op2, vl); + return __riscv_vfsgnj_vv_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m8(op1, op2, vl); + return __riscv_vfsgnj_vf_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnj_vv_f32mf2(op1, op2, vl); + return __riscv_vfsgnj_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32mf2(op1, op2, vl); + return __riscv_vfsgnj_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnj_vv_f32m1(op1, op2, vl); + return __riscv_vfsgnj_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m1(op1, op2, vl); + return __riscv_vfsgnj_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnj_vv_f32m2(op1, op2, vl); + return __riscv_vfsgnj_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m2(op1, op2, vl); + return __riscv_vfsgnj_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnj_vv_f32m4(op1, op2, vl); + return __riscv_vfsgnj_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m4(op1, op2, vl); + return __riscv_vfsgnj_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnj_vv_f32m8(op1, op2, vl); + return __riscv_vfsgnj_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m8(op1, op2, vl); + return __riscv_vfsgnj_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnj_vv_f64m1(op1, op2, vl); + return __riscv_vfsgnj_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m1(op1, op2, vl); + return __riscv_vfsgnj_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnj_vv_f64m2(op1, op2, vl); + return __riscv_vfsgnj_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m2(op1, op2, vl); + return __riscv_vfsgnj_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnj_vv_f64m4(op1, op2, vl); + return __riscv_vfsgnj_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m4(op1, op2, vl); + return __riscv_vfsgnj_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnj_vv_f64m8(op1, op2, vl); + return __riscv_vfsgnj_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m8(op1, op2, vl); + return __riscv_vfsgnj_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnj_vv_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnj_vv_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnj_vv_f16m1_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m1_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnj_vv_f16m2_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m2_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnj_vv_f16m4_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m4_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnj_vv_f16m8_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m8_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnj_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnj_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnj_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnj_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnj_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnj_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnj_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnj_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnj_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn.c index 71caef197519..632611f45921 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjn.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjn_vv_f16mf4(op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf4(op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjn_vv_f16mf2(op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf2(op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjn_vv_f16m1(op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m1(op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjn_vv_f16m2(op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m2(op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjn_vv_f16m4(op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m4(op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjn_vv_f16m8(op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m8(op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjn_vv_f32mf2(op1, op2, vl); + return __riscv_vfsgnjn_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32mf2(op1, op2, vl); + return __riscv_vfsgnjn_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjn_vv_f32m1(op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m1(op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjn_vv_f32m2(op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m2(op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjn_vv_f32m4(op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m4(op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjn_vv_f32m8(op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m8(op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjn_vv_f64m1(op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m1(op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjn_vv_f64m2(op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m2(op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjn_vv_f64m4(op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m4(op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjn_vv_f64m8(op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m8(op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjn_vv_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjn_vv_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjn_vv_f16m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjn_vv_f16m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjn_vv_f16m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjn_vv_f16m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjn_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjn_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjn_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjn_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjn_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjn_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjn_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjn_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjn_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx.c index dfa3e9cbedcd..665dbcc222d9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsgnjx.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjx_vv_f16mf4(op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf4(op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjx_vv_f16mf2(op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf2(op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjx_vv_f16m1(op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m1(op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjx_vv_f16m2(op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m2(op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjx_vv_f16m4(op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m4(op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjx_vv_f16m8(op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m8(op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjx_vv_f32mf2(op1, op2, vl); + return __riscv_vfsgnjx_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32mf2(op1, op2, vl); + return __riscv_vfsgnjx_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjx_vv_f32m1(op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m1(op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjx_vv_f32m2(op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m2(op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjx_vv_f32m4(op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m4(op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjx_vv_f32m8(op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m8(op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjx_vv_f64m1(op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m1(op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjx_vv_f64m2(op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m2(op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjx_vv_f64m4(op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m4(op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjx_vv_f64m8(op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m8(op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjx_vv_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjx_vv_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjx_vv_f16m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjx_vv_f16m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjx_vv_f16m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjx_vv_f16m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjx_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjx_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjx_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjx_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjx_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjx_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjx_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjx_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjx_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down.c index 9496f2568bc4..12d0dcf48293 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1down.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1down_vf_f16mf4(vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf4(src, value, vl); + return __riscv_vfslide1down_vf_f16mf4(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4(vfloat16mf4_t src, _Float16 value, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1down_vf_f16mf2(vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf2(src, value, vl); + return __riscv_vfslide1down_vf_f16mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2(vfloat16mf2_t src, _Float16 value, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1down_vf_f16m1(vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m1(src, value, vl); + return __riscv_vfslide1down_vf_f16m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1(vfloat16m1_t src, _Float16 value, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1down_vf_f16m2(vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m2(src, value, vl); + return __riscv_vfslide1down_vf_f16m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2(vfloat16m2_t src, _Float16 value, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1down_vf_f16m4(vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m4(src, value, vl); + return __riscv_vfslide1down_vf_f16m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4(vfloat16m4_t src, _Float16 value, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1down_vf_f16m8(vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m8(src, value, vl); + return __riscv_vfslide1down_vf_f16m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8(vfloat16m8_t src, _Float16 value, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, size_t vl) { - return vfslide1down_vf_f32mf2(src, value, vl); + return __riscv_vfslide1down_vf_f32mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, size_t vl) { - return vfslide1down_vf_f32m1(src, value, vl); + return __riscv_vfslide1down_vf_f32m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, size_t vl) { - return vfslide1down_vf_f32m2(src, value, vl); + return __riscv_vfslide1down_vf_f32m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, size_t vl) { - return vfslide1down_vf_f32m4(src, value, vl); + return __riscv_vfslide1down_vf_f32m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, size_t vl) { - return vfslide1down_vf_f32m8(src, value, vl); + return __riscv_vfslide1down_vf_f32m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, size_t vl) { - return vfslide1down_vf_f64m1(src, value, vl); + return __riscv_vfslide1down_vf_f64m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, size_t vl) { - return vfslide1down_vf_f64m2(src, value, vl); + return __riscv_vfslide1down_vf_f64m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, size_t vl) { - return vfslide1down_vf_f64m4(src, value, vl); + return __riscv_vfslide1down_vf_f64m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, size_t vl) { - return vfslide1down_vf_f64m8(src, value, vl); + return __riscv_vfslide1down_vf_f64m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_m( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf4_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_m( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf2_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_m( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m1_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m2_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_m( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m4_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_m( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m8_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1down_vf_f32mf2_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float value, size_t vl) { - return vfslide1down_vf_f32m1_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float value, size_t vl) { - return vfslide1down_vf_f32m2_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float value, size_t vl) { - return vfslide1down_vf_f32m4_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float value, size_t vl) { - return vfslide1down_vf_f32m8_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, double value, size_t vl) { - return vfslide1down_vf_f64m1_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, doub // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, double value, size_t vl) { - return vfslide1down_vf_f64m2_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, doub // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, double value, size_t vl) { - return vfslide1down_vf_f64m4_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m( @@ -274,6 +274,6 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, doub // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, double value, size_t vl) { - return vfslide1down_vf_f64m8_m(mask, src, value, vl); + return __riscv_vfslide1down_vf_f64m8_m(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up.c index 2b9d56e9728a..6673d03ea7d3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfslide1up.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1up_vf_f16mf4(vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf4(src, value, vl); + return __riscv_vfslide1up_vf_f16mf4(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4(vfloat16mf4_t src, _Float16 value, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1up_vf_f16mf2(vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf2(src, value, vl); + return __riscv_vfslide1up_vf_f16mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2(vfloat16mf2_t src, _Float16 value, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1up_vf_f16m1(vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m1(src, value, vl); + return __riscv_vfslide1up_vf_f16m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1(vfloat16m1_t src, _Float16 value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1up_vf_f16m2(vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m2(src, value, vl); + return __riscv_vfslide1up_vf_f16m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2(vfloat16m2_t src, _Float16 value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1up_vf_f16m4(vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m4(src, value, vl); + return __riscv_vfslide1up_vf_f16m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4(vfloat16m4_t src, _Float16 value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1up_vf_f16m8(vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m8(src, value, vl); + return __riscv_vfslide1up_vf_f16m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8(vfloat16m8_t src, _Float16 value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, size_t vl) { - return vfslide1up_vf_f32mf2(src, value, vl); + return __riscv_vfslide1up_vf_f32mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, size_t vl) { - return vfslide1up_vf_f32m1(src, value, vl); + return __riscv_vfslide1up_vf_f32m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, size_t vl) { - return vfslide1up_vf_f32m2(src, value, vl); + return __riscv_vfslide1up_vf_f32m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, size_t vl) { - return vfslide1up_vf_f32m4(src, value, vl); + return __riscv_vfslide1up_vf_f32m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, size_t vl) { - return vfslide1up_vf_f32m8(src, value, vl); + return __riscv_vfslide1up_vf_f32m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, size_t vl) { - return vfslide1up_vf_f64m1(src, value, vl); + return __riscv_vfslide1up_vf_f64m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, size_t vl) { - return vfslide1up_vf_f64m2(src, value, vl); + return __riscv_vfslide1up_vf_f64m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, size_t vl) { - return vfslide1up_vf_f64m4(src, value, vl); + return __riscv_vfslide1up_vf_f64m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, size_t vl) { - return vfslide1up_vf_f64m8(src, value, vl); + return __riscv_vfslide1up_vf_f64m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_m( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf4_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_m( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, _Fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf2_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_m( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, _Fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m1_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m2_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_m( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m4_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_m( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m8_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1up_vf_f32mf2_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float value, size_t vl) { - return vfslide1up_vf_f32m1_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float value, size_t vl) { - return vfslide1up_vf_f32m2_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float value, size_t vl) { - return vfslide1up_vf_f32m4_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float value, size_t vl) { - return vfslide1up_vf_f32m8_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, double value, size_t vl) { - return vfslide1up_vf_f64m1_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, double value, size_t vl) { - return vfslide1up_vf_f64m2_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, double value, size_t vl) { - return vfslide1up_vf_f64m4_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m( @@ -274,6 +274,6 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, double value, size_t vl) { - return vfslide1up_vf_f64m8_m(mask, src, value, vl); + return __riscv_vfslide1up_vf_f64m8_m(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c index 82206fca5189..ad116c8cd10f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsqrt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return vfsqrt_v_f16mf4(op1, vl); + return __riscv_vfsqrt_v_f16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return vfsqrt_v_f16mf2(op1, vl); + return __riscv_vfsqrt_v_f16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) { - return vfsqrt_v_f16m1(op1, vl); + return __riscv_vfsqrt_v_f16m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) { - return vfsqrt_v_f16m2(op1, vl); + return __riscv_vfsqrt_v_f16m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) { - return vfsqrt_v_f16m4(op1, vl); + return __riscv_vfsqrt_v_f16m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) { - return vfsqrt_v_f16m8(op1, vl); + return __riscv_vfsqrt_v_f16m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return vfsqrt_v_f32mf2(op1, vl); + return __riscv_vfsqrt_v_f32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) { - return vfsqrt_v_f32m1(op1, vl); + return __riscv_vfsqrt_v_f32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) { - return vfsqrt_v_f32m2(op1, vl); + return __riscv_vfsqrt_v_f32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) { - return vfsqrt_v_f32m4(op1, vl); + return __riscv_vfsqrt_v_f32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) { - return vfsqrt_v_f32m8(op1, vl); + return __riscv_vfsqrt_v_f32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) { - return vfsqrt_v_f64m1(op1, vl); + return __riscv_vfsqrt_v_f64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) { - return vfsqrt_v_f64m2(op1, vl); + return __riscv_vfsqrt_v_f64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) { - return vfsqrt_v_f64m4(op1, vl); + return __riscv_vfsqrt_v_f64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { - return vfsqrt_v_f64m8(op1, vl); + return __riscv_vfsqrt_v_f64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_m( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return vfsqrt_v_f16mf4_m(mask, op1, vl); + return __riscv_vfsqrt_v_f16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_m( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return vfsqrt_v_f16mf2_m(mask, op1, vl); + return __riscv_vfsqrt_v_f16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_m( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return vfsqrt_v_f16m1_m(mask, op1, vl); + return __riscv_vfsqrt_v_f16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_m( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return vfsqrt_v_f16m2_m(mask, op1, vl); + return __riscv_vfsqrt_v_f16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_m( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return vfsqrt_v_f16m4_m(mask, op1, vl); + return __riscv_vfsqrt_v_f16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_m( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return vfsqrt_v_f16m8_m(mask, op1, vl); + return __riscv_vfsqrt_v_f16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return vfsqrt_v_f32mf2_m(mask, op1, vl); + return __riscv_vfsqrt_v_f32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return vfsqrt_v_f32m1_m(mask, op1, vl); + return __riscv_vfsqrt_v_f32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return vfsqrt_v_f32m2_m(mask, op1, vl); + return __riscv_vfsqrt_v_f32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return vfsqrt_v_f32m4_m(mask, op1, vl); + return __riscv_vfsqrt_v_f32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return vfsqrt_v_f32m8_m(mask, op1, vl); + return __riscv_vfsqrt_v_f32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return vfsqrt_v_f64m1_m(mask, op1, vl); + return __riscv_vfsqrt_v_f64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return vfsqrt_v_f64m2_m(mask, op1, vl); + return __riscv_vfsqrt_v_f64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return vfsqrt_v_f64m4_m(mask, op1, vl); + return __riscv_vfsqrt_v_f64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m( @@ -274,6 +274,6 @@ vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return vfsqrt_v_f64m8_m(mask, op1, vl); + return __riscv_vfsqrt_v_f64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub.c index 084808bd165f..be699c5beae6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsub_vv_f16mf4(op1, op2, vl); + return __riscv_vfsub_vv_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf4(op1, op2, vl); + return __riscv_vfsub_vf_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsub_vv_f16mf2(op1, op2, vl); + return __riscv_vfsub_vv_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf2(op1, op2, vl); + return __riscv_vfsub_vf_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsub_vv_f16m1(op1, op2, vl); + return __riscv_vfsub_vv_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m1(op1, op2, vl); + return __riscv_vfsub_vf_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsub_vv_f16m2(op1, op2, vl); + return __riscv_vfsub_vv_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m2(op1, op2, vl); + return __riscv_vfsub_vf_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsub_vv_f16m4(op1, op2, vl); + return __riscv_vfsub_vv_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m4(op1, op2, vl); + return __riscv_vfsub_vf_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsub_vv_f16m8(op1, op2, vl); + return __riscv_vfsub_vv_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m8(op1, op2, vl); + return __riscv_vfsub_vf_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfsub_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsub_vv_f32mf2(op1, op2, vl); + return __riscv_vfsub_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfsub_vf_f32mf2(op1, op2, vl); + return __riscv_vfsub_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsub_vv_f32m1(op1, op2, vl); + return __riscv_vfsub_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfsub_vf_f32m1(op1, op2, vl); + return __riscv_vfsub_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsub_vv_f32m2(op1, op2, vl); + return __riscv_vfsub_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { - return vfsub_vf_f32m2(op1, op2, vl); + return __riscv_vfsub_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsub_vv_f32m4(op1, op2, vl); + return __riscv_vfsub_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { - return vfsub_vf_f32m4(op1, op2, vl); + return __riscv_vfsub_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsub_vv_f32m8(op1, op2, vl); + return __riscv_vfsub_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { - return vfsub_vf_f32m8(op1, op2, vl); + return __riscv_vfsub_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsub_vv_f64m1(op1, op2, vl); + return __riscv_vfsub_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { - return vfsub_vf_f64m1(op1, op2, vl); + return __riscv_vfsub_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsub_vv_f64m2(op1, op2, vl); + return __riscv_vfsub_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { - return vfsub_vf_f64m2(op1, op2, vl); + return __riscv_vfsub_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsub_vv_f64m4(op1, op2, vl); + return __riscv_vfsub_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { - return vfsub_vf_f64m4(op1, op2, vl); + return __riscv_vfsub_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsub_vv_f64m8(op1, op2, vl); + return __riscv_vfsub_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { - return vfsub_vf_f64m8(op1, op2, vl); + return __riscv_vfsub_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_m( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsub_vv_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_m( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf4_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_m( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsub_vv_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_m( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf2_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_m( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsub_vv_f16m1_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_m( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m1_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_m( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsub_vv_f16m2_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_m( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m2_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_m( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsub_vv_f16m4_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_m( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m4_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_m( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsub_vv_f16m8_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_m( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m8_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsub_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsub_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsub_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfsub_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsub_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfsub_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsub_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfsub_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsub_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vfsub_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsub_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vfsub_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsub_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vfsub_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsub_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vfsub_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsub_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfsub_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m( @@ -544,6 +544,6 @@ vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vfsub_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfsub_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd.c index af38d95971f3..c1e1efc511fc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwadd.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_vv_f32mf2(op1, op2, vl); + return __riscv_vfwadd_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32mf2(op1, op2, vl); + return __riscv_vfwadd_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_wv_f32mf2(op1, op2, vl); + return __riscv_vfwadd_wv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2( @@ -40,7 +40,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32mf2(op1, op2, vl); + return __riscv_vfwadd_wf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1( @@ -49,7 +49,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_vv_f32m1(op1, op2, vl); + return __riscv_vfwadd_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1( @@ -58,7 +58,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m1(op1, op2, vl); + return __riscv_vfwadd_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1( @@ -67,7 +67,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_wv_f32m1(op1, op2, vl); + return __riscv_vfwadd_wv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m1(op1, op2, vl); + return __riscv_vfwadd_wf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_vv_f32m2(op1, op2, vl); + return __riscv_vfwadd_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m2(op1, op2, vl); + return __riscv_vfwadd_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2( @@ -103,7 +103,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_wv_f32m2(op1, op2, vl); + return __riscv_vfwadd_wv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2( @@ -112,7 +112,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m2(op1, op2, vl); + return __riscv_vfwadd_wf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4( @@ -121,7 +121,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_vv_f32m4(op1, op2, vl); + return __riscv_vfwadd_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4( @@ -130,7 +130,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m4(op1, op2, vl); + return __riscv_vfwadd_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4( @@ -139,7 +139,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_wv_f32m4(op1, op2, vl); + return __riscv_vfwadd_wv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4( @@ -148,7 +148,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m4(op1, op2, vl); + return __riscv_vfwadd_wf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8( @@ -157,7 +157,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_vv_f32m8(op1, op2, vl); + return __riscv_vfwadd_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8( @@ -166,7 +166,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m8(op1, op2, vl); + return __riscv_vfwadd_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8( @@ -175,7 +175,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_wv_f32m8(op1, op2, vl); + return __riscv_vfwadd_wv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8( @@ -184,7 +184,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m8(op1, op2, vl); + return __riscv_vfwadd_wf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( @@ -193,7 +193,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_vv_f64m1(op1, op2, vl); + return __riscv_vfwadd_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1( @@ -202,7 +202,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m1(op1, op2, vl); + return __riscv_vfwadd_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1( @@ -211,7 +211,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_wv_f64m1(op1, op2, vl); + return __riscv_vfwadd_wv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m1(op1, op2, vl); + return __riscv_vfwadd_wf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_vv_f64m2(op1, op2, vl); + return __riscv_vfwadd_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m2(op1, op2, vl); + return __riscv_vfwadd_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_wv_f64m2(op1, op2, vl); + return __riscv_vfwadd_wv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2( @@ -256,7 +256,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m2(op1, op2, vl); + return __riscv_vfwadd_wf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_vv_f64m4(op1, op2, vl); + return __riscv_vfwadd_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m4(op1, op2, vl); + return __riscv_vfwadd_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( @@ -283,7 +283,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_wv_f64m4(op1, op2, vl); + return __riscv_vfwadd_wv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( @@ -292,7 +292,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m4(op1, op2, vl); + return __riscv_vfwadd_wf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( @@ -301,7 +301,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_vv_f64m8(op1, op2, vl); + return __riscv_vfwadd_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( @@ -310,7 +310,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m8(op1, op2, vl); + return __riscv_vfwadd_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( @@ -319,7 +319,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_wv_f64m8(op1, op2, vl); + return __riscv_vfwadd_wv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m8(op1, op2, vl); + return __riscv_vfwadd_wf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_m( @@ -337,7 +337,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfwadd_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_m( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfwadd_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_m( @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_wv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfwadd_wv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_m( @@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfwadd_wf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_m( @@ -373,7 +373,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfwadd_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_m( @@ -382,7 +382,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfwadd_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_m( @@ -391,7 +391,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_wv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfwadd_wv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_m( @@ -400,7 +400,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfwadd_wf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_m( @@ -409,7 +409,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfwadd_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_m( @@ -418,7 +418,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfwadd_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_wv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfwadd_wv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfwadd_wf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfwadd_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfwadd_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_wv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfwadd_wv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_m( @@ -472,7 +472,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfwadd_wf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_m( @@ -481,7 +481,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfwadd_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_m( @@ -490,7 +490,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfwadd_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_m( @@ -499,7 +499,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_wv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfwadd_wv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_m( @@ -508,7 +508,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfwadd_wf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfwadd_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfwadd_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m( @@ -535,7 +535,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_wv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfwadd_wv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m( @@ -544,7 +544,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfwadd_wf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m( @@ -553,7 +553,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfwadd_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m( @@ -562,7 +562,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfwadd_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m( @@ -571,7 +571,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_wv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfwadd_wv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m( @@ -580,7 +580,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfwadd_wf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m( @@ -589,7 +589,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfwadd_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m( @@ -598,7 +598,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfwadd_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m( @@ -607,7 +607,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_wv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfwadd_wv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m( @@ -616,7 +616,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfwadd_wf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m( @@ -625,7 +625,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfwadd_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m( @@ -634,7 +634,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfwadd_vf_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m( @@ -643,7 +643,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_wv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfwadd_wv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m( @@ -652,6 +652,6 @@ vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfwadd_wf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c index e68b28e8b0eb..e9435918f3ef 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf4(src, vl); + return __riscv_vfwcvt_f_x_v_f16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf2(src, vl); + return __riscv_vfwcvt_f_x_v_f16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m1(src, vl); + return __riscv_vfwcvt_f_x_v_f16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t src, size_t vl) { - return vfwcvt_f_x_v_f16m2(src, vl); + return __riscv_vfwcvt_f_x_v_f16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m4(src, vl); + return __riscv_vfwcvt_f_x_v_f16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t src, size_t vl) { - return vfwcvt_f_x_v_f16m8(src, vl); + return __riscv_vfwcvt_f_x_v_f16m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf4(src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2( @@ -76,7 +76,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf2(src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1( @@ -85,7 +85,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m1(src, vl); + return __riscv_vfwcvt_f_xu_v_f16m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2( @@ -94,7 +94,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m2(src, vl); + return __riscv_vfwcvt_f_xu_v_f16m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4( @@ -103,7 +103,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m4(src, vl); + return __riscv_vfwcvt_f_xu_v_f16m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8( @@ -112,7 +112,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m8(src, vl); + return __riscv_vfwcvt_f_xu_v_f16m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { - return vfwcvt_x_f_v_i32mf2(src, vl); + return __riscv_vfwcvt_x_f_v_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2( @@ -130,7 +130,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32mf2(src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1( @@ -139,7 +139,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m1(src, vl); + return __riscv_vfwcvt_x_f_v_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1( @@ -148,7 +148,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m1(src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2( @@ -157,7 +157,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { - return vfwcvt_x_f_v_i32m2(src, vl); + return __riscv_vfwcvt_x_f_v_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2( @@ -166,7 +166,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m2(src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4( @@ -175,7 +175,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m4(src, vl); + return __riscv_vfwcvt_x_f_v_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4( @@ -184,7 +184,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m4(src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8( @@ -193,7 +193,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { - return vfwcvt_x_f_v_i32m8(src, vl); + return __riscv_vfwcvt_x_f_v_i32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8( @@ -202,7 +202,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m8(src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2( @@ -211,7 +211,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32mf2(src, vl); + return __riscv_vfwcvt_xu_f_v_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2( @@ -220,7 +220,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32mf2(src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1( @@ -229,7 +229,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m1(src, vl); + return __riscv_vfwcvt_xu_f_v_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1( @@ -238,7 +238,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m1(src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2( @@ -247,7 +247,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m2(src, vl); + return __riscv_vfwcvt_xu_f_v_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2( @@ -256,7 +256,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m2(src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4( @@ -265,7 +265,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m4(src, vl); + return __riscv_vfwcvt_xu_f_v_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4( @@ -274,7 +274,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m4(src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8( @@ -283,7 +283,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m8(src, vl); + return __riscv_vfwcvt_xu_f_v_u32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8( @@ -292,7 +292,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m8(src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2( @@ -301,7 +301,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f32mf2(src, vl); + return __riscv_vfwcvt_f_x_v_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1( @@ -310,7 +310,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m1(src, vl); + return __riscv_vfwcvt_f_x_v_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2( @@ -319,7 +319,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) { - return vfwcvt_f_x_v_f32m2(src, vl); + return __riscv_vfwcvt_f_x_v_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4( @@ -328,7 +328,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m4(src, vl); + return __riscv_vfwcvt_f_x_v_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8( @@ -337,7 +337,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) { - return vfwcvt_f_x_v_f32m8(src, vl); + return __riscv_vfwcvt_f_x_v_f32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2( @@ -346,7 +346,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32mf2(src, vl); + return __riscv_vfwcvt_f_xu_v_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1( @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m1(src, vl); + return __riscv_vfwcvt_f_xu_v_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2( @@ -364,7 +364,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m2(src, vl); + return __riscv_vfwcvt_f_xu_v_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4( @@ -373,7 +373,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m4(src, vl); + return __riscv_vfwcvt_f_xu_v_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8( @@ -382,7 +382,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m8(src, vl); + return __riscv_vfwcvt_f_xu_v_f32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2( @@ -391,7 +391,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t src, size_t vl) { - return vfwcvt_f_f_v_f32mf2(src, vl); + return __riscv_vfwcvt_f_f_v_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m1(src, vl); + return __riscv_vfwcvt_f_f_v_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2( @@ -409,7 +409,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t src, size_t vl) { - return vfwcvt_f_f_v_f32m2(src, vl); + return __riscv_vfwcvt_f_f_v_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4( @@ -418,7 +418,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m4(src, vl); + return __riscv_vfwcvt_f_f_v_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8( @@ -427,7 +427,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_f_v_f32m8(vfloat16m4_t src, size_t vl) { - return vfwcvt_f_f_v_f32m8(src, vl); + return __riscv_vfwcvt_f_f_v_f32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1( @@ -436,7 +436,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m1(src, vl); + return __riscv_vfwcvt_x_f_v_i64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1( @@ -445,7 +445,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m1(src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2( @@ -454,7 +454,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { - return vfwcvt_x_f_v_i64m2(src, vl); + return __riscv_vfwcvt_x_f_v_i64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2( @@ -463,7 +463,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m2(src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4( @@ -472,7 +472,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m4(src, vl); + return __riscv_vfwcvt_x_f_v_i64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4( @@ -481,7 +481,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m4(src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8( @@ -490,7 +490,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { - return vfwcvt_x_f_v_i64m8(src, vl); + return __riscv_vfwcvt_x_f_v_i64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8( @@ -499,7 +499,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m8(src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1( @@ -508,7 +508,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m1(src, vl); + return __riscv_vfwcvt_xu_f_v_u64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1( @@ -517,7 +517,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m1(src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2( @@ -526,7 +526,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m2(src, vl); + return __riscv_vfwcvt_xu_f_v_u64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2( @@ -535,7 +535,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m2(src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4( @@ -544,7 +544,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m4(src, vl); + return __riscv_vfwcvt_xu_f_v_u64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( @@ -553,7 +553,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m4(src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8( @@ -562,7 +562,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m8(src, vl); + return __riscv_vfwcvt_xu_f_v_u64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( @@ -571,7 +571,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m8(src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1( @@ -580,7 +580,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m1(src, vl); + return __riscv_vfwcvt_f_x_v_f64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2( @@ -589,7 +589,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) { - return vfwcvt_f_x_v_f64m2(src, vl); + return __riscv_vfwcvt_f_x_v_f64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4( @@ -598,7 +598,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m4(src, vl); + return __riscv_vfwcvt_f_x_v_f64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8( @@ -607,7 +607,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) { - return vfwcvt_f_x_v_f64m8(src, vl); + return __riscv_vfwcvt_f_x_v_f64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1( @@ -616,7 +616,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m1(src, vl); + return __riscv_vfwcvt_f_xu_v_f64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2( @@ -625,7 +625,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m2(src, vl); + return __riscv_vfwcvt_f_xu_v_f64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4( @@ -634,7 +634,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m4(src, vl); + return __riscv_vfwcvt_f_xu_v_f64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8( @@ -643,7 +643,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m8(src, vl); + return __riscv_vfwcvt_f_xu_v_f64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1( @@ -652,7 +652,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m1(src, vl); + return __riscv_vfwcvt_f_f_v_f64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2( @@ -661,7 +661,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) { - return vfwcvt_f_f_v_f64m2(src, vl); + return __riscv_vfwcvt_f_f_v_f64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4( @@ -670,7 +670,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m4(src, vl); + return __riscv_vfwcvt_f_f_v_f64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8( @@ -679,7 +679,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { - return vfwcvt_f_f_v_f64m8(src, vl); + return __riscv_vfwcvt_f_f_v_f64m8(src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_m( @@ -688,7 +688,7 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf4_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_m( @@ -697,7 +697,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vint8mf8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf2_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_m( @@ -706,7 +706,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vint8mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m1_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_m( @@ -715,7 +715,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { - return vfwcvt_f_x_v_f16m2_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_m( @@ -724,7 +724,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m4_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_m( @@ -733,7 +733,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { - return vfwcvt_f_x_v_f16m8_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_m( @@ -742,7 +742,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf4_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_m( @@ -751,7 +751,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint8mf8_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf2_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_m( @@ -760,7 +760,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint8mf4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m1_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_m( @@ -769,7 +769,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint8mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m2_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_m( @@ -778,7 +778,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m4_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_m( @@ -787,7 +787,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m8_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_m( @@ -796,7 +796,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfwcvt_x_f_v_i32mf2_m(mask, src, vl); + return __riscv_vfwcvt_x_f_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m( @@ -805,7 +805,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_m( @@ -814,7 +814,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m1_m(mask, src, vl); + return __riscv_vfwcvt_x_f_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m( @@ -823,7 +823,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m1_m(mask, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_m( @@ -832,7 +832,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfwcvt_x_f_v_i32m2_m(mask, src, vl); + return __riscv_vfwcvt_x_f_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m( @@ -841,7 +841,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m2_m(mask, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_m( @@ -850,7 +850,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m4_m(mask, src, vl); + return __riscv_vfwcvt_x_f_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m( @@ -859,7 +859,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m4_m(mask, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_m( @@ -868,7 +868,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfwcvt_x_f_v_i32m8_m(mask, src, vl); + return __riscv_vfwcvt_x_f_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m( @@ -877,7 +877,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m8_m(mask, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_m( @@ -886,7 +886,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32mf2_m(mask, src, vl); + return __riscv_vfwcvt_xu_f_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m( @@ -895,7 +895,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_m( @@ -904,7 +904,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m1_m(mask, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m( @@ -913,7 +913,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_m( @@ -922,7 +922,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m2_m(mask, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m( @@ -931,7 +931,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_m( @@ -940,7 +940,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m4_m(mask, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m( @@ -949,7 +949,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_m( @@ -958,7 +958,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m8_m(mask, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m( @@ -967,7 +967,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( @@ -976,7 +976,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f32mf2_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m( @@ -985,7 +985,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vint16mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m1_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m( @@ -994,7 +994,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vint16mf2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return vfwcvt_f_x_v_f32m2_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m( @@ -1003,7 +1003,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vint16m1_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m4_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m( @@ -1012,7 +1012,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return vfwcvt_f_x_v_f32m8_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32mf2_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( @@ -1030,7 +1030,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint16mf4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m1_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( @@ -1039,7 +1039,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint16mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m2_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( @@ -1048,7 +1048,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint16m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m4_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( @@ -1057,7 +1057,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint16m2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m8_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_m( @@ -1066,7 +1066,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint16m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return vfwcvt_f_f_v_f32mf2_m(mask, src, vl); + return __riscv_vfwcvt_f_f_v_f32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_m( @@ -1075,7 +1075,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat16mf4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m1_m(mask, src, vl); + return __riscv_vfwcvt_f_f_v_f32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_m( @@ -1084,7 +1084,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return vfwcvt_f_f_v_f32m2_m(mask, src, vl); + return __riscv_vfwcvt_f_f_v_f32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_m( @@ -1093,7 +1093,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat16m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m4_m(mask, src, vl); + return __riscv_vfwcvt_f_f_v_f32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_m( @@ -1102,7 +1102,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat16m2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return vfwcvt_f_f_v_f32m8_m(mask, src, vl); + return __riscv_vfwcvt_f_f_v_f32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m( @@ -1111,7 +1111,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat16m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m1_m(mask, src, vl); + return __riscv_vfwcvt_x_f_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( @@ -1120,7 +1120,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m1_m(mask, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m( @@ -1129,7 +1129,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfwcvt_x_f_v_i64m2_m(mask, src, vl); + return __riscv_vfwcvt_x_f_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( @@ -1138,7 +1138,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m2_m(mask, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m( @@ -1147,7 +1147,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m4_m(mask, src, vl); + return __riscv_vfwcvt_x_f_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( @@ -1156,7 +1156,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m4_m(mask, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m( @@ -1165,7 +1165,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfwcvt_x_f_v_i64m8_m(mask, src, vl); + return __riscv_vfwcvt_x_f_v_i64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( @@ -1174,7 +1174,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m8_m(mask, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( @@ -1183,7 +1183,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m1_m(mask, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( @@ -1192,7 +1192,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( @@ -1201,7 +1201,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m2_m(mask, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( @@ -1210,7 +1210,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( @@ -1219,7 +1219,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m4_m(mask, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( @@ -1228,7 +1228,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( @@ -1237,7 +1237,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m8_m(mask, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( @@ -1246,7 +1246,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m( @@ -1255,7 +1255,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m1_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m( @@ -1264,7 +1264,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vint32mf2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return vfwcvt_f_x_v_f64m2_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m( @@ -1273,7 +1273,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vint32m1_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m4_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m( @@ -1282,7 +1282,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vint32m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return vfwcvt_f_x_v_f64m8_m(mask, src, vl); + return __riscv_vfwcvt_f_x_v_f64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( @@ -1291,7 +1291,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m1_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( @@ -1300,7 +1300,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint32mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m2_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( @@ -1309,7 +1309,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint32m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m4_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( @@ -1318,7 +1318,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint32m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m8_m(mask, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m( @@ -1327,7 +1327,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint32m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m1_m(mask, src, vl); + return __riscv_vfwcvt_f_f_v_f64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m( @@ -1336,7 +1336,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return vfwcvt_f_f_v_f64m2_m(mask, src, vl); + return __riscv_vfwcvt_f_f_v_f64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m( @@ -1345,7 +1345,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat32m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m4_m(mask, src, vl); + return __riscv_vfwcvt_f_f_v_f64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m( @@ -1354,6 +1354,6 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat32m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return vfwcvt_f_f_v_f64m8_m(mask, src, vl); + return __riscv_vfwcvt_f_f_v_f64m8_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc.c index 183a44fe1dd6..05d46e43e3f0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vf_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vf_f32m1(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2( @@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vf_f32m2(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4( @@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4( @@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vf_f32m4(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8( @@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8( @@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vf_f32m8(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1( @@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vf_f64m1(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vf_f64m2(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4( @@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4( @@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vf_f64m4(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8( @@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8( @@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vf_f64m8(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_m( @@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_m( @@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vf_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_m( @@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_m( @@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vf_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_m( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vf_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vf_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_m( @@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_m( @@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vf_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m( @@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m( @@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vf_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m( @@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m( @@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vf_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m( @@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m( @@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vf_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m( @@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m( @@ -328,6 +328,6 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vf_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m8_m(mask, vd, vs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac.c index 111cea227780..2d9817bb45f7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmsac.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vf_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vf_f32m1(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2( @@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vf_f32m2(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4( @@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4( @@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vf_f32m4(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8( @@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8( @@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vf_f32m8(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1( @@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vf_f64m1(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vf_f64m2(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4( @@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4( @@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vf_f64m4(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8( @@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8( @@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vf_f64m8(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_m( @@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_m( @@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vf_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_m( @@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_m( @@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vf_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_m( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vf_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vf_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_m( @@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_m( @@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vf_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m( @@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m( @@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vf_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m( @@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m( @@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vf_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m( @@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m( @@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vf_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m( @@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m( @@ -328,6 +328,6 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vf_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m8_m(mask, vd, vs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul.c index a66640c1cf3d..c5050a08b5a8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwmul.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwmul_vv_f32mf2(op1, op2, vl); + return __riscv_vfwmul_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32mf2(op1, op2, vl); + return __riscv_vfwmul_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwmul_vv_f32m1(op1, op2, vl); + return __riscv_vfwmul_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m1(op1, op2, vl); + return __riscv_vfwmul_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwmul_vv_f32m2(op1, op2, vl); + return __riscv_vfwmul_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2( @@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m2(op1, op2, vl); + return __riscv_vfwmul_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4( @@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwmul_vv_f32m4(op1, op2, vl); + return __riscv_vfwmul_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4( @@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m4(op1, op2, vl); + return __riscv_vfwmul_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8( @@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwmul_vv_f32m8(op1, op2, vl); + return __riscv_vfwmul_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8( @@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m8(op1, op2, vl); + return __riscv_vfwmul_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( @@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwmul_vv_f64m1(op1, op2, vl); + return __riscv_vfwmul_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m1(op1, op2, vl); + return __riscv_vfwmul_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwmul_vv_f64m2(op1, op2, vl); + return __riscv_vfwmul_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m2(op1, op2, vl); + return __riscv_vfwmul_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4( @@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwmul_vv_f64m4(op1, op2, vl); + return __riscv_vfwmul_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4( @@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m4(op1, op2, vl); + return __riscv_vfwmul_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8( @@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwmul_vv_f64m8(op1, op2, vl); + return __riscv_vfwmul_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8( @@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m8(op1, op2, vl); + return __riscv_vfwmul_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_m( @@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwmul_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfwmul_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_m( @@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfwmul_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_m( @@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwmul_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfwmul_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_m( @@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfwmul_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_m( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwmul_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfwmul_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfwmul_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwmul_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfwmul_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfwmul_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_m( @@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwmul_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfwmul_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_m( @@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfwmul_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m( @@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwmul_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfwmul_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m( @@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfwmul_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m( @@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwmul_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfwmul_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m( @@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfwmul_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m( @@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwmul_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfwmul_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m( @@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfwmul_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m( @@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwmul_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfwmul_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m( @@ -328,6 +328,6 @@ vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfwmul_vf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc.c index 0b1e01e62d98..495a431a7554 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vf_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m1(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2( @@ -58,7 +58,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vf_f32m2(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4( @@ -67,7 +67,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4( @@ -76,7 +76,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m4(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8( @@ -85,7 +85,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8( @@ -94,7 +94,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vf_f32m8(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1( @@ -103,7 +103,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m1(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vf_f64m2(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4( @@ -139,7 +139,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4( @@ -148,7 +148,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m4(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8( @@ -157,7 +157,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8( @@ -166,7 +166,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vf_f64m8(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_m( @@ -175,7 +175,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_m( @@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vf_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_m( @@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_m( @@ -202,7 +202,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_m( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vf_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_m( @@ -247,7 +247,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_m( @@ -256,7 +256,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vf_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m( @@ -265,7 +265,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m( @@ -274,7 +274,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m( @@ -283,7 +283,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m( @@ -292,7 +292,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vf_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m( @@ -301,7 +301,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m( @@ -310,7 +310,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m( @@ -319,7 +319,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m( @@ -328,6 +328,6 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vf_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m8_m(mask, vd, vs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac.c index 2415d9c189c4..131d9fe13671 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwnmsac.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vv_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vf_f32mf2(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m1(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m1(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vv_f32m2(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2( @@ -58,7 +58,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vf_f32m2(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4( @@ -67,7 +67,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m4(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4( @@ -76,7 +76,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m4(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8( @@ -85,7 +85,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vv_f32m8(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8( @@ -94,7 +94,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vf_f32m8(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1( @@ -103,7 +103,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m1(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m1(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vv_f64m2(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vf_f64m2(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4( @@ -139,7 +139,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m4(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4( @@ -148,7 +148,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m4(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8( @@ -157,7 +157,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vv_f64m8(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8( @@ -166,7 +166,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vf_f64m8(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_m( @@ -175,7 +175,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_m( @@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vf_f32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_m( @@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_m( @@ -202,7 +202,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_m( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vf_f32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_m( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_m( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_m( @@ -247,7 +247,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_m( @@ -256,7 +256,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vf_f32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m( @@ -265,7 +265,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m( @@ -274,7 +274,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m( @@ -283,7 +283,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m( @@ -292,7 +292,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vf_f64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m( @@ -301,7 +301,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m( @@ -310,7 +310,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m( @@ -319,7 +319,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m( @@ -328,6 +328,6 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vf_f64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m8_m(mask, vd, vs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c index 12b07480a48a..096f2bfba47a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf4_f32m1(vector, scalar, vl); + return __riscv_vfwredosum_vs_f16mf4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1( @@ -22,7 +22,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf2_f32m1(vector, scalar, vl); + return __riscv_vfwredosum_vs_f16mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1( @@ -31,7 +31,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m1_f32m1(vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m2_f32m1(vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m4_f32m1(vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1( @@ -58,7 +58,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m8_f32m1(vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( @@ -67,7 +67,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1(vector, scalar, vl); + return __riscv_vfwredosum_vs_f32mf2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1( @@ -76,7 +76,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m1_f64m1(vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1( @@ -85,7 +85,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m2_f64m1(vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1( @@ -94,7 +94,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m4_f64m1(vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1( @@ -103,7 +103,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m8_f64m1(vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf4_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16mf4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m( @@ -121,7 +121,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m( @@ -130,7 +130,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m( @@ -139,7 +139,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( @@ -166,7 +166,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32mf2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( @@ -175,7 +175,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( @@ -184,7 +184,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( @@ -193,7 +193,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( @@ -202,6 +202,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c index a5a02db975c0..f48660dcbf51 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf4_f32m1(vector, scalar, vl); + return __riscv_vfwredusum_vs_f16mf4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1( @@ -22,7 +22,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf2_f32m1(vector, scalar, vl); + return __riscv_vfwredusum_vs_f16mf2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1( @@ -31,7 +31,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m1_f32m1(vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m1_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m2_f32m1(vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m2_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m4_f32m1(vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m4_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1( @@ -58,7 +58,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m8_f32m1(vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m8_f32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1( @@ -67,7 +67,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1(vector, scalar, vl); + return __riscv_vfwredusum_vs_f32mf2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1( @@ -76,7 +76,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m1_f64m1(vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m1_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1( @@ -85,7 +85,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m2_f64m1(vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m2_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1( @@ -94,7 +94,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m4_f64m1(vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m4_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1( @@ -103,7 +103,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m8_f64m1(vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m8_f64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t sc // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf4_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16mf4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m( @@ -121,7 +121,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16mf2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m( @@ -130,7 +130,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m( @@ -139,7 +139,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m( @@ -166,7 +166,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32mf2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m( @@ -175,7 +175,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m( @@ -184,7 +184,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m( @@ -193,7 +193,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m( @@ -202,6 +202,6 @@ vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub.c index 48f1193a943b..d0cb0199465e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_vv_f32mf2(op1, op2, vl); + return __riscv_vfwsub_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32mf2(op1, op2, vl); + return __riscv_vfwsub_vf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_wv_f32mf2(op1, op2, vl); + return __riscv_vfwsub_wv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2( @@ -40,7 +40,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32mf2(op1, op2, vl); + return __riscv_vfwsub_wf_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1( @@ -49,7 +49,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t op1, _Float16 op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_vv_f32m1(op1, op2, vl); + return __riscv_vfwsub_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1( @@ -58,7 +58,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m1(op1, op2, vl); + return __riscv_vfwsub_vf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1( @@ -67,7 +67,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_wv_f32m1(op1, op2, vl); + return __riscv_vfwsub_wv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m1(op1, op2, vl); + return __riscv_vfwsub_wf_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_vv_f32m2(op1, op2, vl); + return __riscv_vfwsub_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m2(op1, op2, vl); + return __riscv_vfwsub_vf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2( @@ -103,7 +103,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_wv_f32m2(op1, op2, vl); + return __riscv_vfwsub_wv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2( @@ -112,7 +112,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m2(op1, op2, vl); + return __riscv_vfwsub_wf_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4( @@ -121,7 +121,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_vv_f32m4(op1, op2, vl); + return __riscv_vfwsub_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4( @@ -130,7 +130,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m4(op1, op2, vl); + return __riscv_vfwsub_vf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4( @@ -139,7 +139,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_wv_f32m4(op1, op2, vl); + return __riscv_vfwsub_wv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4( @@ -148,7 +148,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m4(op1, op2, vl); + return __riscv_vfwsub_wf_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8( @@ -157,7 +157,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_vv_f32m8(op1, op2, vl); + return __riscv_vfwsub_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8( @@ -166,7 +166,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m8(op1, op2, vl); + return __riscv_vfwsub_vf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8( @@ -175,7 +175,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_wv_f32m8(op1, op2, vl); + return __riscv_vfwsub_wv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8( @@ -184,7 +184,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m8(op1, op2, vl); + return __riscv_vfwsub_wf_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( @@ -193,7 +193,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8(vfloat32m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_vv_f64m1(op1, op2, vl); + return __riscv_vfwsub_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1( @@ -202,7 +202,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m1(op1, op2, vl); + return __riscv_vfwsub_vf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1( @@ -211,7 +211,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_wv_f64m1(op1, op2, vl); + return __riscv_vfwsub_wv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m1(op1, op2, vl); + return __riscv_vfwsub_wf_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_vv_f64m2(op1, op2, vl); + return __riscv_vfwsub_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m2(op1, op2, vl); + return __riscv_vfwsub_vf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_wv_f64m2(op1, op2, vl); + return __riscv_vfwsub_wv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2( @@ -256,7 +256,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m2(op1, op2, vl); + return __riscv_vfwsub_wf_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_vv_f64m4(op1, op2, vl); + return __riscv_vfwsub_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m4(op1, op2, vl); + return __riscv_vfwsub_vf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( @@ -283,7 +283,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_wv_f64m4(op1, op2, vl); + return __riscv_vfwsub_wv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( @@ -292,7 +292,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m4(op1, op2, vl); + return __riscv_vfwsub_wf_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( @@ -301,7 +301,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_vv_f64m8(op1, op2, vl); + return __riscv_vfwsub_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( @@ -310,7 +310,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m8(op1, op2, vl); + return __riscv_vfwsub_vf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( @@ -319,7 +319,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_wv_f64m8(op1, op2, vl); + return __riscv_vfwsub_wv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m8(op1, op2, vl); + return __riscv_vfwsub_wf_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_m( @@ -337,7 +337,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfwsub_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_m( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfwsub_vf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_m( @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_wv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfwsub_wv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_m( @@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32mf2_m(mask, op1, op2, vl); + return __riscv_vfwsub_wf_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_m( @@ -373,7 +373,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfwsub_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_m( @@ -382,7 +382,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfwsub_vf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_m( @@ -391,7 +391,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_wv_f32m1_m(mask, op1, op2, vl); + return __riscv_vfwsub_wv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_m( @@ -400,7 +400,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m1_m(mask, op1, op2, vl); + return __riscv_vfwsub_wf_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_m( @@ -409,7 +409,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfwsub_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_m( @@ -418,7 +418,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfwsub_vf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_m( @@ -427,7 +427,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_wv_f32m2_m(mask, op1, op2, vl); + return __riscv_vfwsub_wv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_m( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m2_m(mask, op1, op2, vl); + return __riscv_vfwsub_wf_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_m( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfwsub_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_m( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfwsub_vf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_m( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_wv_f32m4_m(mask, op1, op2, vl); + return __riscv_vfwsub_wv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_m( @@ -472,7 +472,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m4_m(mask, op1, op2, vl); + return __riscv_vfwsub_wf_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_m( @@ -481,7 +481,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfwsub_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_m( @@ -490,7 +490,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfwsub_vf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_m( @@ -499,7 +499,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_wv_f32m8_m(mask, op1, op2, vl); + return __riscv_vfwsub_wv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_m( @@ -508,7 +508,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m8_m(mask, op1, op2, vl); + return __riscv_vfwsub_wf_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfwsub_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfwsub_vf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m( @@ -535,7 +535,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_wv_f64m1_m(mask, op1, op2, vl); + return __riscv_vfwsub_wv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m( @@ -544,7 +544,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m1_m(mask, op1, op2, vl); + return __riscv_vfwsub_wf_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m( @@ -553,7 +553,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfwsub_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m( @@ -562,7 +562,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfwsub_vf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m( @@ -571,7 +571,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_wv_f64m2_m(mask, op1, op2, vl); + return __riscv_vfwsub_wv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m( @@ -580,7 +580,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m2_m(mask, op1, op2, vl); + return __riscv_vfwsub_wf_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m( @@ -589,7 +589,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfwsub_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m( @@ -598,7 +598,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfwsub_vf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m( @@ -607,7 +607,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_wv_f64m4_m(mask, op1, op2, vl); + return __riscv_vfwsub_wv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m( @@ -616,7 +616,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m4_m(mask, op1, op2, vl); + return __riscv_vfwsub_wf_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m( @@ -625,7 +625,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfwsub_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m( @@ -634,7 +634,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfwsub_vf_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m( @@ -643,7 +643,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_wv_f64m8_m(mask, op1, op2, vl); + return __riscv_vfwsub_wv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m( @@ -652,6 +652,6 @@ vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m8_m(mask, op1, op2, vl); + return __riscv_vfwsub_wf_f64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c index 88ad24bcbeb6..8d5b08e76eaa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m2_f16m1(vfloat16m2_t src, size_t index) { - return vget_v_f16m2_f16m1(src, 0); + return __riscv_vget_v_f16m2_f16m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m1( @@ -22,7 +22,7 @@ vfloat16m1_t test_vget_v_f16m2_f16m1(vfloat16m2_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m4_f16m1(vfloat16m4_t src, size_t index) { - return vget_v_f16m4_f16m1(src, 0); + return __riscv_vget_v_f16m4_f16m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m1( @@ -31,7 +31,7 @@ vfloat16m1_t test_vget_v_f16m4_f16m1(vfloat16m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m8_f16m1(vfloat16m8_t src, size_t index) { - return vget_v_f16m8_f16m1(src, 0); + return __riscv_vget_v_f16m8_f16m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vget_v_f16m8_f16m1(vfloat16m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vget_v_f16m4_f16m2(vfloat16m4_t src, size_t index) { - return vget_v_f16m4_f16m2(src, 0); + return __riscv_vget_v_f16m4_f16m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m2( @@ -49,7 +49,7 @@ vfloat16m2_t test_vget_v_f16m4_f16m2(vfloat16m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vget_v_f16m8_f16m2(vfloat16m8_t src, size_t index) { - return vget_v_f16m8_f16m2(src, 0); + return __riscv_vget_v_f16m8_f16m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m4( @@ -58,7 +58,7 @@ vfloat16m2_t test_vget_v_f16m8_f16m2(vfloat16m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vget_v_f16m8_f16m4(vfloat16m8_t src, size_t index) { - return vget_v_f16m8_f16m4(src, 0); + return __riscv_vget_v_f16m8_f16m4(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1( @@ -67,7 +67,7 @@ vfloat16m4_t test_vget_v_f16m8_f16m4(vfloat16m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) { - return vget_v_f32m2_f32m1(src, 0); + return __riscv_vget_v_f32m2_f32m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1( @@ -76,7 +76,7 @@ vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) { - return vget_v_f32m4_f32m1(src, 0); + return __riscv_vget_v_f32m4_f32m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1( @@ -85,7 +85,7 @@ vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) { - return vget_v_f32m8_f32m1(src, 0); + return __riscv_vget_v_f32m8_f32m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2( @@ -94,7 +94,7 @@ vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) { - return vget_v_f32m4_f32m2(src, 0); + return __riscv_vget_v_f32m4_f32m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2( @@ -103,7 +103,7 @@ vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) { - return vget_v_f32m8_f32m2(src, 0); + return __riscv_vget_v_f32m8_f32m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4( @@ -112,7 +112,7 @@ vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) { - return vget_v_f32m8_f32m4(src, 0); + return __riscv_vget_v_f32m8_f32m4(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1( @@ -121,7 +121,7 @@ vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) { - return vget_v_f64m2_f64m1(src, 0); + return __riscv_vget_v_f64m2_f64m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1( @@ -130,7 +130,7 @@ vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) { - return vget_v_f64m4_f64m1(src, 0); + return __riscv_vget_v_f64m4_f64m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1( @@ -139,7 +139,7 @@ vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) { - return vget_v_f64m8_f64m1(src, 0); + return __riscv_vget_v_f64m8_f64m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2( @@ -148,7 +148,7 @@ vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) { - return vget_v_f64m4_f64m2(src, 0); + return __riscv_vget_v_f64m4_f64m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2( @@ -157,7 +157,7 @@ vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) { - return vget_v_f64m8_f64m2(src, 0); + return __riscv_vget_v_f64m8_f64m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4( @@ -166,7 +166,7 @@ vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) { - return vget_v_f64m8_f64m4(src, 0); + return __riscv_vget_v_f64m8_f64m4(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1( @@ -175,7 +175,7 @@ vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) { - return vget_v_i8m2_i8m1(src, 0); + return __riscv_vget_v_i8m2_i8m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1( @@ -184,7 +184,7 @@ vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) { - return vget_v_i8m4_i8m1(src, 0); + return __riscv_vget_v_i8m4_i8m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1( @@ -193,7 +193,7 @@ vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) { - return vget_v_i8m8_i8m1(src, 0); + return __riscv_vget_v_i8m8_i8m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2( @@ -202,7 +202,7 @@ vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) { - return vget_v_i8m4_i8m2(src, 0); + return __riscv_vget_v_i8m4_i8m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2( @@ -211,7 +211,7 @@ vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) { - return vget_v_i8m8_i8m2(src, 0); + return __riscv_vget_v_i8m8_i8m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4( @@ -220,7 +220,7 @@ vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) { - return vget_v_i8m8_i8m4(src, 0); + return __riscv_vget_v_i8m8_i8m4(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1( @@ -229,7 +229,7 @@ vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) { - return vget_v_i16m2_i16m1(src, 0); + return __riscv_vget_v_i16m2_i16m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1( @@ -238,7 +238,7 @@ vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) { - return vget_v_i16m4_i16m1(src, 0); + return __riscv_vget_v_i16m4_i16m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1( @@ -247,7 +247,7 @@ vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) { - return vget_v_i16m8_i16m1(src, 0); + return __riscv_vget_v_i16m8_i16m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2( @@ -256,7 +256,7 @@ vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) { - return vget_v_i16m4_i16m2(src, 0); + return __riscv_vget_v_i16m4_i16m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2( @@ -265,7 +265,7 @@ vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) { - return vget_v_i16m8_i16m2(src, 0); + return __riscv_vget_v_i16m8_i16m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4( @@ -274,7 +274,7 @@ vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) { - return vget_v_i16m8_i16m4(src, 0); + return __riscv_vget_v_i16m8_i16m4(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1( @@ -283,7 +283,7 @@ vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) { - return vget_v_i32m2_i32m1(src, 0); + return __riscv_vget_v_i32m2_i32m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1( @@ -292,7 +292,7 @@ vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) { - return vget_v_i32m4_i32m1(src, 0); + return __riscv_vget_v_i32m4_i32m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1( @@ -301,7 +301,7 @@ vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) { - return vget_v_i32m8_i32m1(src, 0); + return __riscv_vget_v_i32m8_i32m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2( @@ -310,7 +310,7 @@ vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) { - return vget_v_i32m4_i32m2(src, 0); + return __riscv_vget_v_i32m4_i32m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2( @@ -319,7 +319,7 @@ vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) { - return vget_v_i32m8_i32m2(src, 0); + return __riscv_vget_v_i32m8_i32m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4( @@ -328,7 +328,7 @@ vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) { - return vget_v_i32m8_i32m4(src, 0); + return __riscv_vget_v_i32m8_i32m4(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1( @@ -337,7 +337,7 @@ vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) { - return vget_v_i64m2_i64m1(src, 0); + return __riscv_vget_v_i64m2_i64m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1( @@ -346,7 +346,7 @@ vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) { - return vget_v_i64m4_i64m1(src, 0); + return __riscv_vget_v_i64m4_i64m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1( @@ -355,7 +355,7 @@ vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) { - return vget_v_i64m8_i64m1(src, 0); + return __riscv_vget_v_i64m8_i64m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2( @@ -364,7 +364,7 @@ vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) { - return vget_v_i64m4_i64m2(src, 0); + return __riscv_vget_v_i64m4_i64m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2( @@ -373,7 +373,7 @@ vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) { - return vget_v_i64m8_i64m2(src, 0); + return __riscv_vget_v_i64m8_i64m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4( @@ -382,7 +382,7 @@ vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) { - return vget_v_i64m8_i64m4(src, 0); + return __riscv_vget_v_i64m8_i64m4(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1( @@ -391,7 +391,7 @@ vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) { - return vget_v_u8m2_u8m1(src, 0); + return __riscv_vget_v_u8m2_u8m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1( @@ -400,7 +400,7 @@ vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) { - return vget_v_u8m4_u8m1(src, 0); + return __riscv_vget_v_u8m4_u8m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1( @@ -409,7 +409,7 @@ vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) { - return vget_v_u8m8_u8m1(src, 0); + return __riscv_vget_v_u8m8_u8m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2( @@ -418,7 +418,7 @@ vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) { - return vget_v_u8m4_u8m2(src, 0); + return __riscv_vget_v_u8m4_u8m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2( @@ -427,7 +427,7 @@ vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) { - return vget_v_u8m8_u8m2(src, 0); + return __riscv_vget_v_u8m8_u8m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4( @@ -436,7 +436,7 @@ vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) { - return vget_v_u8m8_u8m4(src, 0); + return __riscv_vget_v_u8m8_u8m4(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1( @@ -445,7 +445,7 @@ vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) { - return vget_v_u16m2_u16m1(src, 0); + return __riscv_vget_v_u16m2_u16m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1( @@ -454,7 +454,7 @@ vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) { - return vget_v_u16m4_u16m1(src, 0); + return __riscv_vget_v_u16m4_u16m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1( @@ -463,7 +463,7 @@ vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) { - return vget_v_u16m8_u16m1(src, 0); + return __riscv_vget_v_u16m8_u16m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2( @@ -472,7 +472,7 @@ vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) { - return vget_v_u16m4_u16m2(src, 0); + return __riscv_vget_v_u16m4_u16m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2( @@ -481,7 +481,7 @@ vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) { - return vget_v_u16m8_u16m2(src, 0); + return __riscv_vget_v_u16m8_u16m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4( @@ -490,7 +490,7 @@ vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) { - return vget_v_u16m8_u16m4(src, 0); + return __riscv_vget_v_u16m8_u16m4(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1( @@ -499,7 +499,7 @@ vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) { - return vget_v_u32m2_u32m1(src, 0); + return __riscv_vget_v_u32m2_u32m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1( @@ -508,7 +508,7 @@ vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) { - return vget_v_u32m4_u32m1(src, 0); + return __riscv_vget_v_u32m4_u32m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1( @@ -517,7 +517,7 @@ vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) { - return vget_v_u32m8_u32m1(src, 0); + return __riscv_vget_v_u32m8_u32m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2( @@ -526,7 +526,7 @@ vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) { - return vget_v_u32m4_u32m2(src, 0); + return __riscv_vget_v_u32m4_u32m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2( @@ -535,7 +535,7 @@ vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) { - return vget_v_u32m8_u32m2(src, 0); + return __riscv_vget_v_u32m8_u32m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4( @@ -544,7 +544,7 @@ vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) { - return vget_v_u32m8_u32m4(src, 0); + return __riscv_vget_v_u32m8_u32m4(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1( @@ -553,7 +553,7 @@ vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) { - return vget_v_u64m2_u64m1(src, 0); + return __riscv_vget_v_u64m2_u64m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1( @@ -562,7 +562,7 @@ vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) { - return vget_v_u64m4_u64m1(src, 0); + return __riscv_vget_v_u64m4_u64m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1( @@ -571,7 +571,7 @@ vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) { - return vget_v_u64m8_u64m1(src, 0); + return __riscv_vget_v_u64m8_u64m1(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2( @@ -580,7 +580,7 @@ vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) { - return vget_v_u64m4_u64m2(src, 0); + return __riscv_vget_v_u64m4_u64m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2( @@ -589,7 +589,7 @@ vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) { - return vget_v_u64m8_u64m2(src, 0); + return __riscv_vget_v_u64m8_u64m2(src, 0); } // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4( @@ -598,6 +598,6 @@ vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) { - return vget_v_u64m8_u64m4(src, 0); + return __riscv_vget_v_u64m8_u64m4(src, 0); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vid.c index ebac7044a319..b5f268237e06 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vid.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vid.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vid_v_u8mf8(size_t vl) { - return vid_v_u8mf8(vl); + return __riscv_vid_v_u8mf8(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf4( @@ -21,7 +21,7 @@ vuint8mf8_t test_vid_v_u8mf8(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vid_v_u8mf4(size_t vl) { - return vid_v_u8mf4(vl); + return __riscv_vid_v_u8mf4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf2( @@ -30,7 +30,7 @@ vuint8mf4_t test_vid_v_u8mf4(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vid_v_u8mf2(size_t vl) { - return vid_v_u8mf2(vl); + return __riscv_vid_v_u8mf2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m1( @@ -39,7 +39,7 @@ vuint8mf2_t test_vid_v_u8mf2(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vid_v_u8m1(size_t vl) { - return vid_v_u8m1(vl); + return __riscv_vid_v_u8m1(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m2( @@ -48,7 +48,7 @@ vuint8m1_t test_vid_v_u8m1(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vid_v_u8m2(size_t vl) { - return vid_v_u8m2(vl); + return __riscv_vid_v_u8m2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m4( @@ -57,7 +57,7 @@ vuint8m2_t test_vid_v_u8m2(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vid_v_u8m4(size_t vl) { - return vid_v_u8m4(vl); + return __riscv_vid_v_u8m4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m8( @@ -66,7 +66,7 @@ vuint8m4_t test_vid_v_u8m4(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vid_v_u8m8(size_t vl) { - return vid_v_u8m8(vl); + return __riscv_vid_v_u8m8(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf4( @@ -75,7 +75,7 @@ vuint8m8_t test_vid_v_u8m8(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vid_v_u16mf4(size_t vl) { - return vid_v_u16mf4(vl); + return __riscv_vid_v_u16mf4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf2( @@ -84,7 +84,7 @@ vuint16mf4_t test_vid_v_u16mf4(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vid_v_u16mf2(size_t vl) { - return vid_v_u16mf2(vl); + return __riscv_vid_v_u16mf2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m1( @@ -93,7 +93,7 @@ vuint16mf2_t test_vid_v_u16mf2(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vid_v_u16m1(size_t vl) { - return vid_v_u16m1(vl); + return __riscv_vid_v_u16m1(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m2( @@ -102,7 +102,7 @@ vuint16m1_t test_vid_v_u16m1(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vid_v_u16m2(size_t vl) { - return vid_v_u16m2(vl); + return __riscv_vid_v_u16m2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m4( @@ -111,7 +111,7 @@ vuint16m2_t test_vid_v_u16m2(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vid_v_u16m4(size_t vl) { - return vid_v_u16m4(vl); + return __riscv_vid_v_u16m4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m8( @@ -120,7 +120,7 @@ vuint16m4_t test_vid_v_u16m4(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vid_v_u16m8(size_t vl) { - return vid_v_u16m8(vl); + return __riscv_vid_v_u16m8(vl); } // CHECK-RV64-LABEL: @test_vid_v_u32mf2( @@ -129,7 +129,7 @@ vuint16m8_t test_vid_v_u16m8(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vid_v_u32mf2(size_t vl) { - return vid_v_u32mf2(vl); + return __riscv_vid_v_u32mf2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m1( @@ -138,7 +138,7 @@ vuint32mf2_t test_vid_v_u32mf2(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vid_v_u32m1(size_t vl) { - return vid_v_u32m1(vl); + return __riscv_vid_v_u32m1(vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m2( @@ -147,7 +147,7 @@ vuint32m1_t test_vid_v_u32m1(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vid_v_u32m2(size_t vl) { - return vid_v_u32m2(vl); + return __riscv_vid_v_u32m2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m4( @@ -156,7 +156,7 @@ vuint32m2_t test_vid_v_u32m2(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vid_v_u32m4(size_t vl) { - return vid_v_u32m4(vl); + return __riscv_vid_v_u32m4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m8( @@ -165,7 +165,7 @@ vuint32m4_t test_vid_v_u32m4(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vid_v_u32m8(size_t vl) { - return vid_v_u32m8(vl); + return __riscv_vid_v_u32m8(vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m1( @@ -174,7 +174,7 @@ vuint32m8_t test_vid_v_u32m8(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vid_v_u64m1(size_t vl) { - return vid_v_u64m1(vl); + return __riscv_vid_v_u64m1(vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m2( @@ -183,7 +183,7 @@ vuint64m1_t test_vid_v_u64m1(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vid_v_u64m2(size_t vl) { - return vid_v_u64m2(vl); + return __riscv_vid_v_u64m2(vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m4( @@ -192,7 +192,7 @@ vuint64m2_t test_vid_v_u64m2(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vid_v_u64m4(size_t vl) { - return vid_v_u64m4(vl); + return __riscv_vid_v_u64m4(vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m8( @@ -201,7 +201,7 @@ vuint64m4_t test_vid_v_u64m4(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vid_v_u64m8(size_t vl) { - return vid_v_u64m8(vl); + return __riscv_vid_v_u64m8(vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf8_m( @@ -210,7 +210,7 @@ vuint64m8_t test_vid_v_u64m8(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, size_t vl) { - return vid_v_u8mf8_m(mask, vl); + return __riscv_vid_v_u8mf8_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf4_m( @@ -219,7 +219,7 @@ vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, size_t vl) { - return vid_v_u8mf4_m(mask, vl); + return __riscv_vid_v_u8mf4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf2_m( @@ -228,7 +228,7 @@ vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, size_t vl) { - return vid_v_u8mf2_m(mask, vl); + return __riscv_vid_v_u8mf2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m1_m( @@ -237,7 +237,7 @@ vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, size_t vl) { - return vid_v_u8m1_m(mask, vl); + return __riscv_vid_v_u8m1_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m2_m( @@ -246,7 +246,7 @@ vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, size_t vl) { - return vid_v_u8m2_m(mask, vl); + return __riscv_vid_v_u8m2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m4_m( @@ -255,7 +255,7 @@ vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, size_t vl) { - return vid_v_u8m4_m(mask, vl); + return __riscv_vid_v_u8m4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m8_m( @@ -264,7 +264,7 @@ vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, size_t vl) { - return vid_v_u8m8_m(mask, vl); + return __riscv_vid_v_u8m8_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf4_m( @@ -273,7 +273,7 @@ vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, size_t vl) { - return vid_v_u16mf4_m(mask, vl); + return __riscv_vid_v_u16mf4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf2_m( @@ -282,7 +282,7 @@ vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, size_t vl) { - return vid_v_u16mf2_m(mask, vl); + return __riscv_vid_v_u16mf2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m1_m( @@ -291,7 +291,7 @@ vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, size_t vl) { - return vid_v_u16m1_m(mask, vl); + return __riscv_vid_v_u16m1_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m2_m( @@ -300,7 +300,7 @@ vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, size_t vl) { - return vid_v_u16m2_m(mask, vl); + return __riscv_vid_v_u16m2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m4_m( @@ -309,7 +309,7 @@ vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, size_t vl) { - return vid_v_u16m4_m(mask, vl); + return __riscv_vid_v_u16m4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m8_m( @@ -318,7 +318,7 @@ vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, size_t vl) { - return vid_v_u16m8_m(mask, vl); + return __riscv_vid_v_u16m8_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32mf2_m( @@ -327,7 +327,7 @@ vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, size_t vl) { - return vid_v_u32mf2_m(mask, vl); + return __riscv_vid_v_u32mf2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m1_m( @@ -336,7 +336,7 @@ vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, size_t vl) { - return vid_v_u32m1_m(mask, vl); + return __riscv_vid_v_u32m1_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m2_m( @@ -345,7 +345,7 @@ vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, size_t vl) { - return vid_v_u32m2_m(mask, vl); + return __riscv_vid_v_u32m2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m4_m( @@ -354,7 +354,7 @@ vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, size_t vl) { - return vid_v_u32m4_m(mask, vl); + return __riscv_vid_v_u32m4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m8_m( @@ -363,7 +363,7 @@ vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, size_t vl) { - return vid_v_u32m8_m(mask, vl); + return __riscv_vid_v_u32m8_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m1_m( @@ -372,7 +372,7 @@ vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, size_t vl) { - return vid_v_u64m1_m(mask, vl); + return __riscv_vid_v_u64m1_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m2_m( @@ -381,7 +381,7 @@ vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, size_t vl) { - return vid_v_u64m2_m(mask, vl); + return __riscv_vid_v_u64m2_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m4_m( @@ -390,7 +390,7 @@ vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, size_t vl) { - return vid_v_u64m4_m(mask, vl); + return __riscv_vid_v_u64m4_m(mask, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m8_m( @@ -399,6 +399,6 @@ vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, size_t vl) { - return vid_v_u64m8_m(mask, vl); + return __riscv_vid_v_u64m8_m(mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/viota.c index 9c05ca026d21..44758b535d4b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/viota.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/viota.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) { - return viota_m_u8mf8(op1, vl); + return __riscv_viota_m_u8mf8(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf4( @@ -21,7 +21,7 @@ vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) { - return viota_m_u8mf4(op1, vl); + return __riscv_viota_m_u8mf4(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf2( @@ -30,7 +30,7 @@ vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) { - return viota_m_u8mf2(op1, vl); + return __riscv_viota_m_u8mf2(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m1( @@ -39,7 +39,7 @@ vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) { - return viota_m_u8m1(op1, vl); + return __riscv_viota_m_u8m1(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m2( @@ -48,7 +48,7 @@ vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) { - return viota_m_u8m2(op1, vl); + return __riscv_viota_m_u8m2(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m4( @@ -57,7 +57,7 @@ vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) { - return viota_m_u8m4(op1, vl); + return __riscv_viota_m_u8m4(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m8( @@ -66,7 +66,7 @@ vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) { - return viota_m_u8m8(op1, vl); + return __riscv_viota_m_u8m8(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf4( @@ -75,7 +75,7 @@ vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) { - return viota_m_u16mf4(op1, vl); + return __riscv_viota_m_u16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf2( @@ -84,7 +84,7 @@ vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) { - return viota_m_u16mf2(op1, vl); + return __riscv_viota_m_u16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m1( @@ -93,7 +93,7 @@ vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) { - return viota_m_u16m1(op1, vl); + return __riscv_viota_m_u16m1(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m2( @@ -102,7 +102,7 @@ vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) { - return viota_m_u16m2(op1, vl); + return __riscv_viota_m_u16m2(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m4( @@ -111,7 +111,7 @@ vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) { - return viota_m_u16m4(op1, vl); + return __riscv_viota_m_u16m4(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m8( @@ -120,7 +120,7 @@ vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) { - return viota_m_u16m8(op1, vl); + return __riscv_viota_m_u16m8(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32mf2( @@ -129,7 +129,7 @@ vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) { - return viota_m_u32mf2(op1, vl); + return __riscv_viota_m_u32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m1( @@ -138,7 +138,7 @@ vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) { - return viota_m_u32m1(op1, vl); + return __riscv_viota_m_u32m1(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m2( @@ -147,7 +147,7 @@ vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) { - return viota_m_u32m2(op1, vl); + return __riscv_viota_m_u32m2(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m4( @@ -156,7 +156,7 @@ vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) { - return viota_m_u32m4(op1, vl); + return __riscv_viota_m_u32m4(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m8( @@ -165,7 +165,7 @@ vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) { - return viota_m_u32m8(op1, vl); + return __riscv_viota_m_u32m8(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m1( @@ -174,7 +174,7 @@ vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) { - return viota_m_u64m1(op1, vl); + return __riscv_viota_m_u64m1(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m2( @@ -183,7 +183,7 @@ vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) { - return viota_m_u64m2(op1, vl); + return __riscv_viota_m_u64m2(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m4( @@ -192,7 +192,7 @@ vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) { - return viota_m_u64m4(op1, vl); + return __riscv_viota_m_u64m4(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m8( @@ -201,7 +201,7 @@ vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) { - return viota_m_u64m8(op1, vl); + return __riscv_viota_m_u64m8(op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf8_m( @@ -210,7 +210,7 @@ vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return viota_m_u8mf8_m(mask, op1, vl); + return __riscv_viota_m_u8mf8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf4_m( @@ -219,7 +219,7 @@ vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return viota_m_u8mf4_m(mask, op1, vl); + return __riscv_viota_m_u8mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf2_m( @@ -228,7 +228,7 @@ vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return viota_m_u8mf2_m(mask, op1, vl); + return __riscv_viota_m_u8mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m1_m( @@ -237,7 +237,7 @@ vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return viota_m_u8m1_m(mask, op1, vl); + return __riscv_viota_m_u8m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m2_m( @@ -246,7 +246,7 @@ vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return viota_m_u8m2_m(mask, op1, vl); + return __riscv_viota_m_u8m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m4_m( @@ -255,7 +255,7 @@ vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return viota_m_u8m4_m(mask, op1, vl); + return __riscv_viota_m_u8m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m8_m( @@ -264,7 +264,7 @@ vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return viota_m_u8m8_m(mask, op1, vl); + return __riscv_viota_m_u8m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf4_m( @@ -273,7 +273,7 @@ vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return viota_m_u16mf4_m(mask, op1, vl); + return __riscv_viota_m_u16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf2_m( @@ -282,7 +282,7 @@ vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return viota_m_u16mf2_m(mask, op1, vl); + return __riscv_viota_m_u16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m1_m( @@ -291,7 +291,7 @@ vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return viota_m_u16m1_m(mask, op1, vl); + return __riscv_viota_m_u16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m2_m( @@ -300,7 +300,7 @@ vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return viota_m_u16m2_m(mask, op1, vl); + return __riscv_viota_m_u16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m4_m( @@ -309,7 +309,7 @@ vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return viota_m_u16m4_m(mask, op1, vl); + return __riscv_viota_m_u16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m8_m( @@ -318,7 +318,7 @@ vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return viota_m_u16m8_m(mask, op1, vl); + return __riscv_viota_m_u16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32mf2_m( @@ -327,7 +327,7 @@ vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return viota_m_u32mf2_m(mask, op1, vl); + return __riscv_viota_m_u32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m1_m( @@ -336,7 +336,7 @@ vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return viota_m_u32m1_m(mask, op1, vl); + return __riscv_viota_m_u32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m2_m( @@ -345,7 +345,7 @@ vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return viota_m_u32m2_m(mask, op1, vl); + return __riscv_viota_m_u32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m4_m( @@ -354,7 +354,7 @@ vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return viota_m_u32m4_m(mask, op1, vl); + return __riscv_viota_m_u32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m8_m( @@ -363,7 +363,7 @@ vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return viota_m_u32m8_m(mask, op1, vl); + return __riscv_viota_m_u32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m1_m( @@ -372,7 +372,7 @@ vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return viota_m_u64m1_m(mask, op1, vl); + return __riscv_viota_m_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m2_m( @@ -381,7 +381,7 @@ vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return viota_m_u64m2_m(mask, op1, vl); + return __riscv_viota_m_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m4_m( @@ -390,7 +390,7 @@ vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return viota_m_u64m4_m(mask, op1, vl); + return __riscv_viota_m_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m8_m( @@ -399,6 +399,6 @@ vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return viota_m_u64m8_m(mask, op1, vl); + return __riscv_viota_m_u64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16.c index 1fdd680d7f8a..a90be62d7392 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vle16_v_f16mf4(const _Float16 *base, size_t vl) { - return vle16_v_f16mf4(base, vl); + return __riscv_vle16_v_f16mf4(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vle16_v_f16mf4(const _Float16 *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vle16_v_f16mf2(const _Float16 *base, size_t vl) { - return vle16_v_f16mf2(base, vl); + return __riscv_vle16_v_f16mf2(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vle16_v_f16mf2(const _Float16 *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vle16_v_f16m1(const _Float16 *base, size_t vl) { - return vle16_v_f16m1(base, vl); + return __riscv_vle16_v_f16m1(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vle16_v_f16m1(const _Float16 *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vle16_v_f16m2(const _Float16 *base, size_t vl) { - return vle16_v_f16m2(base, vl); + return __riscv_vle16_v_f16m2(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vle16_v_f16m2(const _Float16 *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vle16_v_f16m4(const _Float16 *base, size_t vl) { - return vle16_v_f16m4(base, vl); + return __riscv_vle16_v_f16m4(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vle16_v_f16m4(const _Float16 *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vle16_v_f16m8(const _Float16 *base, size_t vl) { - return vle16_v_f16m8(base, vl); + return __riscv_vle16_v_f16m8(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf4( @@ -67,7 +67,7 @@ vfloat16m8_t test_vle16_v_f16m8(const _Float16 *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vle16_v_i16mf4(const int16_t *base, size_t vl) { - return vle16_v_i16mf4(base, vl); + return __riscv_vle16_v_i16mf4(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf2( @@ -76,7 +76,7 @@ vint16mf4_t test_vle16_v_i16mf4(const int16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vle16_v_i16mf2(const int16_t *base, size_t vl) { - return vle16_v_i16mf2(base, vl); + return __riscv_vle16_v_i16mf2(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m1( @@ -85,7 +85,7 @@ vint16mf2_t test_vle16_v_i16mf2(const int16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vle16_v_i16m1(const int16_t *base, size_t vl) { - return vle16_v_i16m1(base, vl); + return __riscv_vle16_v_i16m1(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m2( @@ -94,7 +94,7 @@ vint16m1_t test_vle16_v_i16m1(const int16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vle16_v_i16m2(const int16_t *base, size_t vl) { - return vle16_v_i16m2(base, vl); + return __riscv_vle16_v_i16m2(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m4( @@ -103,7 +103,7 @@ vint16m2_t test_vle16_v_i16m2(const int16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vle16_v_i16m4(const int16_t *base, size_t vl) { - return vle16_v_i16m4(base, vl); + return __riscv_vle16_v_i16m4(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m8( @@ -112,7 +112,7 @@ vint16m4_t test_vle16_v_i16m4(const int16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vle16_v_i16m8(const int16_t *base, size_t vl) { - return vle16_v_i16m8(base, vl); + return __riscv_vle16_v_i16m8(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf4( @@ -121,7 +121,7 @@ vint16m8_t test_vle16_v_i16m8(const int16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vle16_v_u16mf4(const uint16_t *base, size_t vl) { - return vle16_v_u16mf4(base, vl); + return __riscv_vle16_v_u16mf4(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf2( @@ -130,7 +130,7 @@ vuint16mf4_t test_vle16_v_u16mf4(const uint16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vle16_v_u16mf2(const uint16_t *base, size_t vl) { - return vle16_v_u16mf2(base, vl); + return __riscv_vle16_v_u16mf2(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m1( @@ -139,7 +139,7 @@ vuint16mf2_t test_vle16_v_u16mf2(const uint16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vle16_v_u16m1(const uint16_t *base, size_t vl) { - return vle16_v_u16m1(base, vl); + return __riscv_vle16_v_u16m1(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m2( @@ -148,7 +148,7 @@ vuint16m1_t test_vle16_v_u16m1(const uint16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vle16_v_u16m2(const uint16_t *base, size_t vl) { - return vle16_v_u16m2(base, vl); + return __riscv_vle16_v_u16m2(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m4( @@ -157,7 +157,7 @@ vuint16m2_t test_vle16_v_u16m2(const uint16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vle16_v_u16m4(const uint16_t *base, size_t vl) { - return vle16_v_u16m4(base, vl); + return __riscv_vle16_v_u16m4(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m8( @@ -166,7 +166,7 @@ vuint16m4_t test_vle16_v_u16m4(const uint16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vle16_v_u16m8(const uint16_t *base, size_t vl) { - return vle16_v_u16m8(base, vl); + return __riscv_vle16_v_u16m8(base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf4_m( @@ -175,7 +175,7 @@ vuint16m8_t test_vle16_v_u16m8(const uint16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size_t vl) { - return vle16_v_f16mf4_m(mask, base, vl); + return __riscv_vle16_v_f16mf4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf2_m( @@ -184,7 +184,7 @@ vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size_t vl) { - return vle16_v_f16mf2_m(mask, base, vl); + return __riscv_vle16_v_f16mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m1_m( @@ -193,7 +193,7 @@ vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t vl) { - return vle16_v_f16m1_m(mask, base, vl); + return __riscv_vle16_v_f16m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m2_m( @@ -202,7 +202,7 @@ vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t vl) { - return vle16_v_f16m2_m(mask, base, vl); + return __riscv_vle16_v_f16m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m4_m( @@ -211,7 +211,7 @@ vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t vl) { - return vle16_v_f16m4_m(mask, base, vl); + return __riscv_vle16_v_f16m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m8_m( @@ -220,7 +220,7 @@ vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t vl) { - return vle16_v_f16m8_m(mask, base, vl); + return __riscv_vle16_v_f16m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m( @@ -229,7 +229,7 @@ vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t vl) { - return vle16_v_i16mf4_m(mask, base, vl); + return __riscv_vle16_v_i16mf4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m( @@ -238,7 +238,7 @@ vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t vl) { - return vle16_v_i16mf2_m(mask, base, vl); + return __riscv_vle16_v_i16mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m1_m( @@ -247,7 +247,7 @@ vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t vl) { - return vle16_v_i16m1_m(mask, base, vl); + return __riscv_vle16_v_i16m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m2_m( @@ -256,7 +256,7 @@ vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t vl) { - return vle16_v_i16m2_m(mask, base, vl); + return __riscv_vle16_v_i16m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m4_m( @@ -265,7 +265,7 @@ vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t vl) { - return vle16_v_i16m4_m(mask, base, vl); + return __riscv_vle16_v_i16m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m8_m( @@ -274,7 +274,7 @@ vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t vl) { - return vle16_v_i16m8_m(mask, base, vl); + return __riscv_vle16_v_i16m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m( @@ -283,7 +283,7 @@ vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return vle16_v_u16mf4_m(mask, base, vl); + return __riscv_vle16_v_u16mf4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m( @@ -292,7 +292,7 @@ vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return vle16_v_u16mf2_m(mask, base, vl); + return __riscv_vle16_v_u16mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m1_m( @@ -301,7 +301,7 @@ vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return vle16_v_u16m1_m(mask, base, vl); + return __riscv_vle16_v_u16m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m2_m( @@ -310,7 +310,7 @@ vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t vl) { - return vle16_v_u16m2_m(mask, base, vl); + return __riscv_vle16_v_u16m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m4_m( @@ -319,7 +319,7 @@ vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t vl) { - return vle16_v_u16m4_m(mask, base, vl); + return __riscv_vle16_v_u16m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m8_m( @@ -328,6 +328,6 @@ vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t vl) { - return vle16_v_u16m8_m(mask, base, vl); + return __riscv_vle16_v_u16m8_m(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16ff.c index e74b4e312b5e..e7e3b6371403 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle16ff.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vle16ff_v_f16mf4(const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf4(base, new_vl, vl); + return __riscv_vle16ff_v_f16mf4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2( @@ -28,7 +28,7 @@ vfloat16mf4_t test_vle16ff_v_f16mf4(const _Float16 *base, size_t *new_vl, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vle16ff_v_f16mf2(const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf2(base, new_vl, vl); + return __riscv_vle16ff_v_f16mf2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m1( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vle16ff_v_f16mf2(const _Float16 *base, size_t *new_vl, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vle16ff_v_f16m1(const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m1(base, new_vl, vl); + return __riscv_vle16ff_v_f16m1(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m2( @@ -52,7 +52,7 @@ vfloat16m1_t test_vle16ff_v_f16m1(const _Float16 *base, size_t *new_vl, size_t v // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vle16ff_v_f16m2(const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m2(base, new_vl, vl); + return __riscv_vle16ff_v_f16m2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m4( @@ -64,7 +64,7 @@ vfloat16m2_t test_vle16ff_v_f16m2(const _Float16 *base, size_t *new_vl, size_t v // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vle16ff_v_f16m4(const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m4(base, new_vl, vl); + return __riscv_vle16ff_v_f16m4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m8( @@ -76,7 +76,7 @@ vfloat16m4_t test_vle16ff_v_f16m4(const _Float16 *base, size_t *new_vl, size_t v // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vle16ff_v_f16m8(const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m8(base, new_vl, vl); + return __riscv_vle16ff_v_f16m8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4( @@ -88,7 +88,7 @@ vfloat16m8_t test_vle16ff_v_f16m8(const _Float16 *base, size_t *new_vl, size_t v // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16ff_v_i16mf4(const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf4(base, new_vl, vl); + return __riscv_vle16ff_v_i16mf4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2( @@ -100,7 +100,7 @@ vint16mf4_t test_vle16ff_v_i16mf4(const int16_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16ff_v_i16mf2(const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf2(base, new_vl, vl); + return __riscv_vle16ff_v_i16mf2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1( @@ -112,7 +112,7 @@ vint16mf2_t test_vle16ff_v_i16mf2(const int16_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16ff_v_i16m1(const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m1(base, new_vl, vl); + return __riscv_vle16ff_v_i16m1(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2( @@ -124,7 +124,7 @@ vint16m1_t test_vle16ff_v_i16m1(const int16_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16ff_v_i16m2(const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m2(base, new_vl, vl); + return __riscv_vle16ff_v_i16m2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4( @@ -136,7 +136,7 @@ vint16m2_t test_vle16ff_v_i16m2(const int16_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16ff_v_i16m4(const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m4(base, new_vl, vl); + return __riscv_vle16ff_v_i16m4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8( @@ -148,7 +148,7 @@ vint16m4_t test_vle16ff_v_i16m4(const int16_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16ff_v_i16m8(const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m8(base, new_vl, vl); + return __riscv_vle16ff_v_i16m8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4( @@ -160,7 +160,7 @@ vint16m8_t test_vle16ff_v_i16m8(const int16_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16ff_v_u16mf4(const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf4(base, new_vl, vl); + return __riscv_vle16ff_v_u16mf4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2( @@ -172,7 +172,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4(const uint16_t *base, size_t *new_vl, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16ff_v_u16mf2(const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf2(base, new_vl, vl); + return __riscv_vle16ff_v_u16mf2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1( @@ -184,7 +184,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2(const uint16_t *base, size_t *new_vl, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16ff_v_u16m1(const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m1(base, new_vl, vl); + return __riscv_vle16ff_v_u16m1(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2( @@ -196,7 +196,7 @@ vuint16m1_t test_vle16ff_v_u16m1(const uint16_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16ff_v_u16m2(const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m2(base, new_vl, vl); + return __riscv_vle16ff_v_u16m2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4( @@ -208,7 +208,7 @@ vuint16m2_t test_vle16ff_v_u16m2(const uint16_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16ff_v_u16m4(const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m4(base, new_vl, vl); + return __riscv_vle16ff_v_u16m4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8( @@ -220,7 +220,7 @@ vuint16m4_t test_vle16ff_v_u16m4(const uint16_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16ff_v_u16m8(const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m8(base, new_vl, vl); + return __riscv_vle16ff_v_u16m8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_m( @@ -232,7 +232,7 @@ vuint16m8_t test_vle16ff_v_u16m8(const uint16_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf4_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_f16mf4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_m( @@ -244,7 +244,7 @@ vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t mask, const _Float16 *base, size // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf2_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_f16mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_m( @@ -256,7 +256,7 @@ vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t mask, const _Float16 *base, size // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m1_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_f16m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_m( @@ -268,7 +268,7 @@ vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t mask, const _Float16 *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m2_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_f16m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_m( @@ -280,7 +280,7 @@ vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t mask, const _Float16 *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m4_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_f16m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_m( @@ -292,7 +292,7 @@ vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t mask, const _Float16 *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m8_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_f16m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_m( @@ -304,7 +304,7 @@ vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t mask, const _Float16 *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf4_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_i16mf4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_m( @@ -316,7 +316,7 @@ vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf2_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_i16mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_m( @@ -328,7 +328,7 @@ vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m1_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_i16m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_m( @@ -340,7 +340,7 @@ vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m2_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_i16m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_m( @@ -352,7 +352,7 @@ vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t *ne // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m4_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_i16m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_m( @@ -364,7 +364,7 @@ vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t *ne // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m8_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_i16m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_m( @@ -376,7 +376,7 @@ vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t *ne // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf4_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_u16mf4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_m( @@ -388,7 +388,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf2_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_u16mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_m( @@ -400,7 +400,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m1_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_u16m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_m( @@ -412,7 +412,7 @@ vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m2_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_u16m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_m( @@ -424,7 +424,7 @@ vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t * // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m4_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_u16m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_m( @@ -436,6 +436,6 @@ vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t * // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16ff_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m8_m(mask, base, new_vl, vl); + return __riscv_vle16ff_v_u16m8_m(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32.c index 0ae87f4b4f70..1af90a42364a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vle32_v_f32mf2(const float *base, size_t vl) { - return vle32_v_f32mf2(base, vl); + return __riscv_vle32_v_f32mf2(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m1( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vle32_v_f32mf2(const float *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vle32_v_f32m1(const float *base, size_t vl) { - return vle32_v_f32m1(base, vl); + return __riscv_vle32_v_f32m1(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m2( @@ -31,7 +31,7 @@ vfloat32m1_t test_vle32_v_f32m1(const float *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vle32_v_f32m2(const float *base, size_t vl) { - return vle32_v_f32m2(base, vl); + return __riscv_vle32_v_f32m2(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m4( @@ -40,7 +40,7 @@ vfloat32m2_t test_vle32_v_f32m2(const float *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vle32_v_f32m4(const float *base, size_t vl) { - return vle32_v_f32m4(base, vl); + return __riscv_vle32_v_f32m4(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m8( @@ -49,7 +49,7 @@ vfloat32m4_t test_vle32_v_f32m4(const float *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vle32_v_f32m8(const float *base, size_t vl) { - return vle32_v_f32m8(base, vl); + return __riscv_vle32_v_f32m8(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32mf2( @@ -58,7 +58,7 @@ vfloat32m8_t test_vle32_v_f32m8(const float *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vle32_v_i32mf2(const int32_t *base, size_t vl) { - return vle32_v_i32mf2(base, vl); + return __riscv_vle32_v_i32mf2(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m1( @@ -67,7 +67,7 @@ vint32mf2_t test_vle32_v_i32mf2(const int32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vle32_v_i32m1(const int32_t *base, size_t vl) { - return vle32_v_i32m1(base, vl); + return __riscv_vle32_v_i32m1(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m2( @@ -76,7 +76,7 @@ vint32m1_t test_vle32_v_i32m1(const int32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vle32_v_i32m2(const int32_t *base, size_t vl) { - return vle32_v_i32m2(base, vl); + return __riscv_vle32_v_i32m2(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m4( @@ -85,7 +85,7 @@ vint32m2_t test_vle32_v_i32m2(const int32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vle32_v_i32m4(const int32_t *base, size_t vl) { - return vle32_v_i32m4(base, vl); + return __riscv_vle32_v_i32m4(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m8( @@ -94,7 +94,7 @@ vint32m4_t test_vle32_v_i32m4(const int32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vle32_v_i32m8(const int32_t *base, size_t vl) { - return vle32_v_i32m8(base, vl); + return __riscv_vle32_v_i32m8(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32mf2( @@ -103,7 +103,7 @@ vint32m8_t test_vle32_v_i32m8(const int32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vle32_v_u32mf2(const uint32_t *base, size_t vl) { - return vle32_v_u32mf2(base, vl); + return __riscv_vle32_v_u32mf2(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m1( @@ -112,7 +112,7 @@ vuint32mf2_t test_vle32_v_u32mf2(const uint32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vle32_v_u32m1(const uint32_t *base, size_t vl) { - return vle32_v_u32m1(base, vl); + return __riscv_vle32_v_u32m1(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m2( @@ -121,7 +121,7 @@ vuint32m1_t test_vle32_v_u32m1(const uint32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vle32_v_u32m2(const uint32_t *base, size_t vl) { - return vle32_v_u32m2(base, vl); + return __riscv_vle32_v_u32m2(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m4( @@ -130,7 +130,7 @@ vuint32m2_t test_vle32_v_u32m2(const uint32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vle32_v_u32m4(const uint32_t *base, size_t vl) { - return vle32_v_u32m4(base, vl); + return __riscv_vle32_v_u32m4(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m8( @@ -139,7 +139,7 @@ vuint32m4_t test_vle32_v_u32m4(const uint32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vle32_v_u32m8(const uint32_t *base, size_t vl) { - return vle32_v_u32m8(base, vl); + return __riscv_vle32_v_u32m8(base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m( @@ -148,7 +148,7 @@ vuint32m8_t test_vle32_v_u32m8(const uint32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, const float *base, size_t vl) { - return vle32_v_f32mf2_m(mask, base, vl); + return __riscv_vle32_v_f32mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m1_m( @@ -157,7 +157,7 @@ vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, const float *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, const float *base, size_t vl) { - return vle32_v_f32m1_m(mask, base, vl); + return __riscv_vle32_v_f32m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m2_m( @@ -166,7 +166,7 @@ vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, const float *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, const float *base, size_t vl) { - return vle32_v_f32m2_m(mask, base, vl); + return __riscv_vle32_v_f32m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m4_m( @@ -175,7 +175,7 @@ vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, const float *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, const float *base, size_t vl) { - return vle32_v_f32m4_m(mask, base, vl); + return __riscv_vle32_v_f32m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m8_m( @@ -184,7 +184,7 @@ vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, const float *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, const float *base, size_t vl) { - return vle32_v_f32m8_m(mask, base, vl); + return __riscv_vle32_v_f32m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m( @@ -193,7 +193,7 @@ vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, const float *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t vl) { - return vle32_v_i32mf2_m(mask, base, vl); + return __riscv_vle32_v_i32mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m1_m( @@ -202,7 +202,7 @@ vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t vl) { - return vle32_v_i32m1_m(mask, base, vl); + return __riscv_vle32_v_i32m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m2_m( @@ -211,7 +211,7 @@ vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t vl) { - return vle32_v_i32m2_m(mask, base, vl); + return __riscv_vle32_v_i32m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m4_m( @@ -220,7 +220,7 @@ vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t vl) { - return vle32_v_i32m4_m(mask, base, vl); + return __riscv_vle32_v_i32m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m8_m( @@ -229,7 +229,7 @@ vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t vl) { - return vle32_v_i32m8_m(mask, base, vl); + return __riscv_vle32_v_i32m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m( @@ -238,7 +238,7 @@ vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return vle32_v_u32mf2_m(mask, base, vl); + return __riscv_vle32_v_u32mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m1_m( @@ -247,7 +247,7 @@ vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return vle32_v_u32m1_m(mask, base, vl); + return __riscv_vle32_v_u32m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m2_m( @@ -256,7 +256,7 @@ vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t vl) { - return vle32_v_u32m2_m(mask, base, vl); + return __riscv_vle32_v_u32m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m4_m( @@ -265,7 +265,7 @@ vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t vl) { - return vle32_v_u32m4_m(mask, base, vl); + return __riscv_vle32_v_u32m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m8_m( @@ -274,6 +274,6 @@ vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t vl) { - return vle32_v_u32m8_m(mask, base, vl); + return __riscv_vle32_v_u32m8_m(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32ff.c index 11ae11c5a9b1..c5d21ec26f2a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle32ff.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32ff_v_f32mf2(const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32mf2(base, new_vl, vl); + return __riscv_vle32ff_v_f32mf2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1( @@ -28,7 +28,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2(const float *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32ff_v_f32m1(const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m1(base, new_vl, vl); + return __riscv_vle32ff_v_f32m1(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2( @@ -40,7 +40,7 @@ vfloat32m1_t test_vle32ff_v_f32m1(const float *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32ff_v_f32m2(const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m2(base, new_vl, vl); + return __riscv_vle32ff_v_f32m2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4( @@ -52,7 +52,7 @@ vfloat32m2_t test_vle32ff_v_f32m2(const float *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32ff_v_f32m4(const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m4(base, new_vl, vl); + return __riscv_vle32ff_v_f32m4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8( @@ -64,7 +64,7 @@ vfloat32m4_t test_vle32ff_v_f32m4(const float *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32ff_v_f32m8(const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m8(base, new_vl, vl); + return __riscv_vle32ff_v_f32m8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2( @@ -76,7 +76,7 @@ vfloat32m8_t test_vle32ff_v_f32m8(const float *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32ff_v_i32mf2(const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32mf2(base, new_vl, vl); + return __riscv_vle32ff_v_i32mf2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1( @@ -88,7 +88,7 @@ vint32mf2_t test_vle32ff_v_i32mf2(const int32_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32ff_v_i32m1(const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m1(base, new_vl, vl); + return __riscv_vle32ff_v_i32m1(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2( @@ -100,7 +100,7 @@ vint32m1_t test_vle32ff_v_i32m1(const int32_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32ff_v_i32m2(const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m2(base, new_vl, vl); + return __riscv_vle32ff_v_i32m2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4( @@ -112,7 +112,7 @@ vint32m2_t test_vle32ff_v_i32m2(const int32_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32ff_v_i32m4(const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m4(base, new_vl, vl); + return __riscv_vle32ff_v_i32m4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8( @@ -124,7 +124,7 @@ vint32m4_t test_vle32ff_v_i32m4(const int32_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32ff_v_i32m8(const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m8(base, new_vl, vl); + return __riscv_vle32ff_v_i32m8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2( @@ -136,7 +136,7 @@ vint32m8_t test_vle32ff_v_i32m8(const int32_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32ff_v_u32mf2(const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32mf2(base, new_vl, vl); + return __riscv_vle32ff_v_u32mf2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1( @@ -148,7 +148,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2(const uint32_t *base, size_t *new_vl, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32ff_v_u32m1(const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m1(base, new_vl, vl); + return __riscv_vle32ff_v_u32m1(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2( @@ -160,7 +160,7 @@ vuint32m1_t test_vle32ff_v_u32m1(const uint32_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32ff_v_u32m2(const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m2(base, new_vl, vl); + return __riscv_vle32ff_v_u32m2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4( @@ -172,7 +172,7 @@ vuint32m2_t test_vle32ff_v_u32m2(const uint32_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32ff_v_u32m4(const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m4(base, new_vl, vl); + return __riscv_vle32ff_v_u32m4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8( @@ -184,7 +184,7 @@ vuint32m4_t test_vle32ff_v_u32m4(const uint32_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32ff_v_u32m8(const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m8(base, new_vl, vl); + return __riscv_vle32ff_v_u32m8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_m( @@ -196,7 +196,7 @@ vuint32m8_t test_vle32ff_v_u32m8(const uint32_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32mf2_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_f32mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_m( @@ -208,7 +208,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t mask, const float *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m1_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_f32m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_m( @@ -220,7 +220,7 @@ vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t mask, const float *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m2_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_f32m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_m( @@ -232,7 +232,7 @@ vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t mask, const float *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t mask, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m4_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_f32m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_m( @@ -244,7 +244,7 @@ vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t mask, const float *base, size_t *ne // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t mask, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m8_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_f32m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_m( @@ -256,7 +256,7 @@ vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t mask, const float *base, size_t *ne // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32mf2_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_i32mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_m( @@ -268,7 +268,7 @@ vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m1_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_i32m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_m( @@ -280,7 +280,7 @@ vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m2_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_i32m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_m( @@ -292,7 +292,7 @@ vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m4_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_i32m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_m( @@ -304,7 +304,7 @@ vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t *ne // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m8_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_i32m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_m( @@ -316,7 +316,7 @@ vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t *ne // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32mf2_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_u32mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_m( @@ -328,7 +328,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m1_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_u32m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_m( @@ -340,7 +340,7 @@ vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m2_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_u32m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_m( @@ -352,7 +352,7 @@ vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m4_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_u32m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_m( @@ -364,6 +364,6 @@ vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t * // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32ff_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m8_m(mask, base, new_vl, vl); + return __riscv_vle32ff_v_u32m8_m(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64.c index f3407ce89ea5..4c48eb4590c7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vle64_v_f64m1(const double *base, size_t vl) { - return vle64_v_f64m1(base, vl); + return __riscv_vle64_v_f64m1(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m2( @@ -22,7 +22,7 @@ vfloat64m1_t test_vle64_v_f64m1(const double *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vle64_v_f64m2(const double *base, size_t vl) { - return vle64_v_f64m2(base, vl); + return __riscv_vle64_v_f64m2(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m4( @@ -31,7 +31,7 @@ vfloat64m2_t test_vle64_v_f64m2(const double *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vle64_v_f64m4(const double *base, size_t vl) { - return vle64_v_f64m4(base, vl); + return __riscv_vle64_v_f64m4(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m8( @@ -40,7 +40,7 @@ vfloat64m4_t test_vle64_v_f64m4(const double *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vle64_v_f64m8(const double *base, size_t vl) { - return vle64_v_f64m8(base, vl); + return __riscv_vle64_v_f64m8(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m1( @@ -49,7 +49,7 @@ vfloat64m8_t test_vle64_v_f64m8(const double *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vle64_v_i64m1(const int64_t *base, size_t vl) { - return vle64_v_i64m1(base, vl); + return __riscv_vle64_v_i64m1(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m2( @@ -58,7 +58,7 @@ vint64m1_t test_vle64_v_i64m1(const int64_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vle64_v_i64m2(const int64_t *base, size_t vl) { - return vle64_v_i64m2(base, vl); + return __riscv_vle64_v_i64m2(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m4( @@ -67,7 +67,7 @@ vint64m2_t test_vle64_v_i64m2(const int64_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vle64_v_i64m4(const int64_t *base, size_t vl) { - return vle64_v_i64m4(base, vl); + return __riscv_vle64_v_i64m4(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m8( @@ -76,7 +76,7 @@ vint64m4_t test_vle64_v_i64m4(const int64_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vle64_v_i64m8(const int64_t *base, size_t vl) { - return vle64_v_i64m8(base, vl); + return __riscv_vle64_v_i64m8(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m1( @@ -85,7 +85,7 @@ vint64m8_t test_vle64_v_i64m8(const int64_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vle64_v_u64m1(const uint64_t *base, size_t vl) { - return vle64_v_u64m1(base, vl); + return __riscv_vle64_v_u64m1(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m2( @@ -94,7 +94,7 @@ vuint64m1_t test_vle64_v_u64m1(const uint64_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vle64_v_u64m2(const uint64_t *base, size_t vl) { - return vle64_v_u64m2(base, vl); + return __riscv_vle64_v_u64m2(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m4( @@ -103,7 +103,7 @@ vuint64m2_t test_vle64_v_u64m2(const uint64_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vle64_v_u64m4(const uint64_t *base, size_t vl) { - return vle64_v_u64m4(base, vl); + return __riscv_vle64_v_u64m4(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m8( @@ -112,7 +112,7 @@ vuint64m4_t test_vle64_v_u64m4(const uint64_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vle64_v_u64m8(const uint64_t *base, size_t vl) { - return vle64_v_u64m8(base, vl); + return __riscv_vle64_v_u64m8(base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m1_m( @@ -121,7 +121,7 @@ vuint64m8_t test_vle64_v_u64m8(const uint64_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, const double *base, size_t vl) { - return vle64_v_f64m1_m(mask, base, vl); + return __riscv_vle64_v_f64m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m2_m( @@ -130,7 +130,7 @@ vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, const double *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, const double *base, size_t vl) { - return vle64_v_f64m2_m(mask, base, vl); + return __riscv_vle64_v_f64m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m4_m( @@ -139,7 +139,7 @@ vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, const double *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, const double *base, size_t vl) { - return vle64_v_f64m4_m(mask, base, vl); + return __riscv_vle64_v_f64m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m8_m( @@ -148,7 +148,7 @@ vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, const double *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, const double *base, size_t vl) { - return vle64_v_f64m8_m(mask, base, vl); + return __riscv_vle64_v_f64m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m1_m( @@ -157,7 +157,7 @@ vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, const double *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t vl) { - return vle64_v_i64m1_m(mask, base, vl); + return __riscv_vle64_v_i64m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m2_m( @@ -166,7 +166,7 @@ vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t vl) { - return vle64_v_i64m2_m(mask, base, vl); + return __riscv_vle64_v_i64m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m4_m( @@ -175,7 +175,7 @@ vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t vl) { - return vle64_v_i64m4_m(mask, base, vl); + return __riscv_vle64_v_i64m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m8_m( @@ -184,7 +184,7 @@ vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t vl) { - return vle64_v_i64m8_m(mask, base, vl); + return __riscv_vle64_v_i64m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m1_m( @@ -193,7 +193,7 @@ vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return vle64_v_u64m1_m(mask, base, vl); + return __riscv_vle64_v_u64m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m2_m( @@ -202,7 +202,7 @@ vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t vl) { - return vle64_v_u64m2_m(mask, base, vl); + return __riscv_vle64_v_u64m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m4_m( @@ -211,7 +211,7 @@ vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t vl) { - return vle64_v_u64m4_m(mask, base, vl); + return __riscv_vle64_v_u64m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m8_m( @@ -220,6 +220,6 @@ vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t vl) { - return vle64_v_u64m8_m(mask, base, vl); + return __riscv_vle64_v_u64m8_m(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64ff.c index 1720535ae37d..e426b7177a08 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle64ff.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64ff_v_f64m1(const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m1(base, new_vl, vl); + return __riscv_vle64ff_v_f64m1(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2( @@ -28,7 +28,7 @@ vfloat64m1_t test_vle64ff_v_f64m1(const double *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64ff_v_f64m2(const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m2(base, new_vl, vl); + return __riscv_vle64ff_v_f64m2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4( @@ -40,7 +40,7 @@ vfloat64m2_t test_vle64ff_v_f64m2(const double *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64ff_v_f64m4(const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m4(base, new_vl, vl); + return __riscv_vle64ff_v_f64m4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8( @@ -52,7 +52,7 @@ vfloat64m4_t test_vle64ff_v_f64m4(const double *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64ff_v_f64m8(const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m8(base, new_vl, vl); + return __riscv_vle64ff_v_f64m8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1( @@ -64,7 +64,7 @@ vfloat64m8_t test_vle64ff_v_f64m8(const double *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64ff_v_i64m1(const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m1(base, new_vl, vl); + return __riscv_vle64ff_v_i64m1(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2( @@ -76,7 +76,7 @@ vint64m1_t test_vle64ff_v_i64m1(const int64_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64ff_v_i64m2(const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m2(base, new_vl, vl); + return __riscv_vle64ff_v_i64m2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4( @@ -88,7 +88,7 @@ vint64m2_t test_vle64ff_v_i64m2(const int64_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64ff_v_i64m4(const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m4(base, new_vl, vl); + return __riscv_vle64ff_v_i64m4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8( @@ -100,7 +100,7 @@ vint64m4_t test_vle64ff_v_i64m4(const int64_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64ff_v_i64m8(const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m8(base, new_vl, vl); + return __riscv_vle64ff_v_i64m8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1( @@ -112,7 +112,7 @@ vint64m8_t test_vle64ff_v_i64m8(const int64_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64ff_v_u64m1(const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m1(base, new_vl, vl); + return __riscv_vle64ff_v_u64m1(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2( @@ -124,7 +124,7 @@ vuint64m1_t test_vle64ff_v_u64m1(const uint64_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64ff_v_u64m2(const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m2(base, new_vl, vl); + return __riscv_vle64ff_v_u64m2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4( @@ -136,7 +136,7 @@ vuint64m2_t test_vle64ff_v_u64m2(const uint64_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64ff_v_u64m4(const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m4(base, new_vl, vl); + return __riscv_vle64ff_v_u64m4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8( @@ -148,7 +148,7 @@ vuint64m4_t test_vle64ff_v_u64m4(const uint64_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64ff_v_u64m8(const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m8(base, new_vl, vl); + return __riscv_vle64ff_v_u64m8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_m( @@ -160,7 +160,7 @@ vuint64m8_t test_vle64ff_v_u64m8(const uint64_t *base, size_t *new_vl, size_t vl // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m1_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_f64m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_m( @@ -172,7 +172,7 @@ vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t mask, const double *base, size_t * // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m2_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_f64m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_m( @@ -184,7 +184,7 @@ vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t mask, const double *base, size_t * // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t mask, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m4_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_f64m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_m( @@ -196,7 +196,7 @@ vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t mask, const double *base, size_t * // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t mask, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m8_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_f64m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_m( @@ -208,7 +208,7 @@ vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t mask, const double *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m1_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_i64m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_m( @@ -220,7 +220,7 @@ vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m2_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_i64m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_m( @@ -232,7 +232,7 @@ vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m4_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_i64m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_m( @@ -244,7 +244,7 @@ vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m8_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_i64m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_m( @@ -256,7 +256,7 @@ vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t *ne // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m1_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_u64m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_m( @@ -268,7 +268,7 @@ vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m2_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_u64m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_m( @@ -280,7 +280,7 @@ vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m4_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_u64m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_m( @@ -292,6 +292,6 @@ vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64ff_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m8_m(mask, base, new_vl, vl); + return __riscv_vle64ff_v_u64m8_m(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8.c index 9896542d185a..96671d946dce 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vle8_v_i8mf8(const int8_t *base, size_t vl) { - return vle8_v_i8mf8(base, vl); + return __riscv_vle8_v_i8mf8(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf4( @@ -22,7 +22,7 @@ vint8mf8_t test_vle8_v_i8mf8(const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vle8_v_i8mf4(const int8_t *base, size_t vl) { - return vle8_v_i8mf4(base, vl); + return __riscv_vle8_v_i8mf4(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf2( @@ -31,7 +31,7 @@ vint8mf4_t test_vle8_v_i8mf4(const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vle8_v_i8mf2(const int8_t *base, size_t vl) { - return vle8_v_i8mf2(base, vl); + return __riscv_vle8_v_i8mf2(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m1( @@ -40,7 +40,7 @@ vint8mf2_t test_vle8_v_i8mf2(const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vle8_v_i8m1(const int8_t *base, size_t vl) { - return vle8_v_i8m1(base, vl); + return __riscv_vle8_v_i8m1(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m2( @@ -49,7 +49,7 @@ vint8m1_t test_vle8_v_i8m1(const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vle8_v_i8m2(const int8_t *base, size_t vl) { - return vle8_v_i8m2(base, vl); + return __riscv_vle8_v_i8m2(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m4( @@ -58,7 +58,7 @@ vint8m2_t test_vle8_v_i8m2(const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vle8_v_i8m4(const int8_t *base, size_t vl) { - return vle8_v_i8m4(base, vl); + return __riscv_vle8_v_i8m4(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m8( @@ -67,7 +67,7 @@ vint8m4_t test_vle8_v_i8m4(const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vle8_v_i8m8(const int8_t *base, size_t vl) { - return vle8_v_i8m8(base, vl); + return __riscv_vle8_v_i8m8(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf8( @@ -76,7 +76,7 @@ vint8m8_t test_vle8_v_i8m8(const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vle8_v_u8mf8(const uint8_t *base, size_t vl) { - return vle8_v_u8mf8(base, vl); + return __riscv_vle8_v_u8mf8(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf4( @@ -85,7 +85,7 @@ vuint8mf8_t test_vle8_v_u8mf8(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vle8_v_u8mf4(const uint8_t *base, size_t vl) { - return vle8_v_u8mf4(base, vl); + return __riscv_vle8_v_u8mf4(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf2( @@ -94,7 +94,7 @@ vuint8mf4_t test_vle8_v_u8mf4(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vle8_v_u8mf2(const uint8_t *base, size_t vl) { - return vle8_v_u8mf2(base, vl); + return __riscv_vle8_v_u8mf2(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m1( @@ -103,7 +103,7 @@ vuint8mf2_t test_vle8_v_u8mf2(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vle8_v_u8m1(const uint8_t *base, size_t vl) { - return vle8_v_u8m1(base, vl); + return __riscv_vle8_v_u8m1(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m2( @@ -112,7 +112,7 @@ vuint8m1_t test_vle8_v_u8m1(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vle8_v_u8m2(const uint8_t *base, size_t vl) { - return vle8_v_u8m2(base, vl); + return __riscv_vle8_v_u8m2(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m4( @@ -121,7 +121,7 @@ vuint8m2_t test_vle8_v_u8m2(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vle8_v_u8m4(const uint8_t *base, size_t vl) { - return vle8_v_u8m4(base, vl); + return __riscv_vle8_v_u8m4(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m8( @@ -130,7 +130,7 @@ vuint8m4_t test_vle8_v_u8m4(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vle8_v_u8m8(const uint8_t *base, size_t vl) { - return vle8_v_u8m8(base, vl); + return __riscv_vle8_v_u8m8(base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m( @@ -139,7 +139,7 @@ vuint8m8_t test_vle8_v_u8m8(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t vl) { - return vle8_v_i8mf8_m(mask, base, vl); + return __riscv_vle8_v_i8mf8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m( @@ -148,7 +148,7 @@ vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t vl) { - return vle8_v_i8mf4_m(mask, base, vl); + return __riscv_vle8_v_i8mf4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m( @@ -157,7 +157,7 @@ vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t vl) { - return vle8_v_i8mf2_m(mask, base, vl); + return __riscv_vle8_v_i8mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m1_m( @@ -166,7 +166,7 @@ vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t vl) { - return vle8_v_i8m1_m(mask, base, vl); + return __riscv_vle8_v_i8m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m2_m( @@ -175,7 +175,7 @@ vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t vl) { - return vle8_v_i8m2_m(mask, base, vl); + return __riscv_vle8_v_i8m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m4_m( @@ -184,7 +184,7 @@ vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t vl) { - return vle8_v_i8m4_m(mask, base, vl); + return __riscv_vle8_v_i8m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m8_m( @@ -193,7 +193,7 @@ vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t vl) { - return vle8_v_i8m8_m(mask, base, vl); + return __riscv_vle8_v_i8m8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m( @@ -202,7 +202,7 @@ vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return vle8_v_u8mf8_m(mask, base, vl); + return __riscv_vle8_v_u8mf8_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m( @@ -211,7 +211,7 @@ vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return vle8_v_u8mf4_m(mask, base, vl); + return __riscv_vle8_v_u8mf4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m( @@ -220,7 +220,7 @@ vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return vle8_v_u8mf2_m(mask, base, vl); + return __riscv_vle8_v_u8mf2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m1_m( @@ -229,7 +229,7 @@ vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return vle8_v_u8m1_m(mask, base, vl); + return __riscv_vle8_v_u8m1_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m2_m( @@ -238,7 +238,7 @@ vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t vl) { - return vle8_v_u8m2_m(mask, base, vl); + return __riscv_vle8_v_u8m2_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m4_m( @@ -247,7 +247,7 @@ vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t vl) { - return vle8_v_u8m4_m(mask, base, vl); + return __riscv_vle8_v_u8m4_m(mask, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m8_m( @@ -256,6 +256,6 @@ vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t vl) { - return vle8_v_u8m8_m(mask, base, vl); + return __riscv_vle8_v_u8m8_m(mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8ff.c index 0704a8bfceaf..2ef54cc392d0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vle8ff.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8ff_v_i8mf8(const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf8(base, new_vl, vl); + return __riscv_vle8ff_v_i8mf8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4( @@ -28,7 +28,7 @@ vint8mf8_t test_vle8ff_v_i8mf8(const int8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8ff_v_i8mf4(const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf4(base, new_vl, vl); + return __riscv_vle8ff_v_i8mf4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2( @@ -40,7 +40,7 @@ vint8mf4_t test_vle8ff_v_i8mf4(const int8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8ff_v_i8mf2(const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf2(base, new_vl, vl); + return __riscv_vle8ff_v_i8mf2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1( @@ -52,7 +52,7 @@ vint8mf2_t test_vle8ff_v_i8mf2(const int8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8ff_v_i8m1(const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m1(base, new_vl, vl); + return __riscv_vle8ff_v_i8m1(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2( @@ -64,7 +64,7 @@ vint8m1_t test_vle8ff_v_i8m1(const int8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8ff_v_i8m2(const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m2(base, new_vl, vl); + return __riscv_vle8ff_v_i8m2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4( @@ -76,7 +76,7 @@ vint8m2_t test_vle8ff_v_i8m2(const int8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8ff_v_i8m4(const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m4(base, new_vl, vl); + return __riscv_vle8ff_v_i8m4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8( @@ -88,7 +88,7 @@ vint8m4_t test_vle8ff_v_i8m4(const int8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8ff_v_i8m8(const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m8(base, new_vl, vl); + return __riscv_vle8ff_v_i8m8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8( @@ -100,7 +100,7 @@ vint8m8_t test_vle8ff_v_i8m8(const int8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8ff_v_u8mf8(const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf8(base, new_vl, vl); + return __riscv_vle8ff_v_u8mf8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4( @@ -112,7 +112,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8(const uint8_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8ff_v_u8mf4(const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf4(base, new_vl, vl); + return __riscv_vle8ff_v_u8mf4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2( @@ -124,7 +124,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4(const uint8_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8ff_v_u8mf2(const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf2(base, new_vl, vl); + return __riscv_vle8ff_v_u8mf2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1( @@ -136,7 +136,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2(const uint8_t *base, size_t *new_vl, size_t vl) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8ff_v_u8m1(const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m1(base, new_vl, vl); + return __riscv_vle8ff_v_u8m1(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2( @@ -148,7 +148,7 @@ vuint8m1_t test_vle8ff_v_u8m1(const uint8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8ff_v_u8m2(const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m2(base, new_vl, vl); + return __riscv_vle8ff_v_u8m2(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4( @@ -160,7 +160,7 @@ vuint8m2_t test_vle8ff_v_u8m2(const uint8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8ff_v_u8m4(const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m4(base, new_vl, vl); + return __riscv_vle8ff_v_u8m4(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8( @@ -172,7 +172,7 @@ vuint8m4_t test_vle8ff_v_u8m4(const uint8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8ff_v_u8m8(const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m8(base, new_vl, vl); + return __riscv_vle8ff_v_u8m8(base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_m( @@ -184,7 +184,7 @@ vuint8m8_t test_vle8ff_v_u8m8(const uint8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf8_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_m( @@ -196,7 +196,7 @@ vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t *new // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf4_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_m( @@ -208,7 +208,7 @@ vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t *new // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf2_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_m( @@ -220,7 +220,7 @@ vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t *new // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m1_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_i8m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_m( @@ -232,7 +232,7 @@ vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t *new_vl // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m2_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_i8m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_m( @@ -244,7 +244,7 @@ vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t *new_vl // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m4_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_i8m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_m( @@ -256,7 +256,7 @@ vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t *new_vl // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m8_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_i8m8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_m( @@ -268,7 +268,7 @@ vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t *new_vl // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf8_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf8_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_m( @@ -280,7 +280,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf4_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_m( @@ -292,7 +292,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf2_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_m( @@ -304,7 +304,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t *n // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m1_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_u8m1_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_m( @@ -316,7 +316,7 @@ vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t *new_ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m2_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_u8m2_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_m( @@ -328,7 +328,7 @@ vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t *new_ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m4_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_u8m4_m(mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_m( @@ -340,6 +340,6 @@ vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t *new_ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m8_m(mask, base, new_vl, vl); + return __riscv_vle8ff_v_u8m8_m(mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlm.c index c82124613166..f3015ae64b7c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlm.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlm.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vlm_v_b1(const uint8_t *base, size_t vl) { - return vlm_v_b1(base, vl); + return __riscv_vlm_v_b1(base, vl); } // CHECK-RV64-LABEL: @test_vlm_v_b2( @@ -21,7 +21,7 @@ vbool1_t test_vlm_v_b1(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vlm_v_b2(const uint8_t *base, size_t vl) { - return vlm_v_b2(base, vl); + return __riscv_vlm_v_b2(base, vl); } // CHECK-RV64-LABEL: @test_vlm_v_b4( @@ -30,7 +30,7 @@ vbool2_t test_vlm_v_b2(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vlm_v_b4(const uint8_t *base, size_t vl) { - return vlm_v_b4(base, vl); + return __riscv_vlm_v_b4(base, vl); } // CHECK-RV64-LABEL: @test_vlm_v_b8( @@ -39,7 +39,7 @@ vbool4_t test_vlm_v_b4(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vlm_v_b8(const uint8_t *base, size_t vl) { - return vlm_v_b8(base, vl); + return __riscv_vlm_v_b8(base, vl); } // CHECK-RV64-LABEL: @test_vlm_v_b16( @@ -48,7 +48,7 @@ vbool8_t test_vlm_v_b8(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vlm_v_b16(const uint8_t *base, size_t vl) { - return vlm_v_b16(base, vl); + return __riscv_vlm_v_b16(base, vl); } // CHECK-RV64-LABEL: @test_vlm_v_b32( @@ -57,7 +57,7 @@ vbool16_t test_vlm_v_b16(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vlm_v_b32(const uint8_t *base, size_t vl) { - return vlm_v_b32(base, vl); + return __riscv_vlm_v_b32(base, vl); } // CHECK-RV64-LABEL: @test_vlm_v_b64( @@ -66,6 +66,6 @@ vbool32_t test_vlm_v_b32(const uint8_t *base, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vlm_v_b64(const uint8_t *base, size_t vl) { - return vlm_v_b64(base, vl); + return __riscv_vlm_v_b64(base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul.c index 85580291d982..2f9713aab4a5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2(vfloat16mf4_t op1) { - return vlmul_ext_v_f16mf4_f16mf2(op1); + return __riscv_vlmul_ext_v_f16mf4_f16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m1( @@ -22,7 +22,7 @@ vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2(vfloat16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1(vfloat16mf4_t op1) { - return vlmul_ext_v_f16mf4_f16m1(op1); + return __riscv_vlmul_ext_v_f16mf4_f16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m2( @@ -31,7 +31,7 @@ vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1(vfloat16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2(vfloat16mf4_t op1) { - return vlmul_ext_v_f16mf4_f16m2(op1); + return __riscv_vlmul_ext_v_f16mf4_f16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m4( @@ -40,7 +40,7 @@ vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2(vfloat16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4(vfloat16mf4_t op1) { - return vlmul_ext_v_f16mf4_f16m4(op1); + return __riscv_vlmul_ext_v_f16mf4_f16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m8( @@ -49,7 +49,7 @@ vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4(vfloat16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8(vfloat16mf4_t op1) { - return vlmul_ext_v_f16mf4_f16m8(op1); + return __riscv_vlmul_ext_v_f16mf4_f16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m1( @@ -58,7 +58,7 @@ vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8(vfloat16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1(vfloat16mf2_t op1) { - return vlmul_ext_v_f16mf2_f16m1(op1); + return __riscv_vlmul_ext_v_f16mf2_f16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1(vfloat16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2(vfloat16mf2_t op1) { - return vlmul_ext_v_f16mf2_f16m2(op1); + return __riscv_vlmul_ext_v_f16mf2_f16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m4( @@ -76,7 +76,7 @@ vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2(vfloat16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4(vfloat16mf2_t op1) { - return vlmul_ext_v_f16mf2_f16m4(op1); + return __riscv_vlmul_ext_v_f16mf2_f16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m8( @@ -85,7 +85,7 @@ vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4(vfloat16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8(vfloat16mf2_t op1) { - return vlmul_ext_v_f16mf2_f16m8(op1); + return __riscv_vlmul_ext_v_f16mf2_f16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m2( @@ -94,7 +94,7 @@ vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8(vfloat16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2(vfloat16m1_t op1) { - return vlmul_ext_v_f16m1_f16m2(op1); + return __riscv_vlmul_ext_v_f16m1_f16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m4( @@ -103,7 +103,7 @@ vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2(vfloat16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4(vfloat16m1_t op1) { - return vlmul_ext_v_f16m1_f16m4(op1); + return __riscv_vlmul_ext_v_f16m1_f16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m8( @@ -112,7 +112,7 @@ vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4(vfloat16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8(vfloat16m1_t op1) { - return vlmul_ext_v_f16m1_f16m8(op1); + return __riscv_vlmul_ext_v_f16m1_f16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m4( @@ -121,7 +121,7 @@ vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8(vfloat16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4(vfloat16m2_t op1) { - return vlmul_ext_v_f16m2_f16m4(op1); + return __riscv_vlmul_ext_v_f16m2_f16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m8( @@ -130,7 +130,7 @@ vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4(vfloat16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8(vfloat16m2_t op1) { - return vlmul_ext_v_f16m2_f16m8(op1); + return __riscv_vlmul_ext_v_f16m2_f16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m4_f16m8( @@ -139,7 +139,7 @@ vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8(vfloat16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8(vfloat16m4_t op1) { - return vlmul_ext_v_f16m4_f16m8(op1); + return __riscv_vlmul_ext_v_f16m4_f16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1( @@ -148,7 +148,7 @@ vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8(vfloat16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { - return vlmul_ext_v_f32mf2_f32m1(op1); + return __riscv_vlmul_ext_v_f32mf2_f32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { - return vlmul_ext_v_f32mf2_f32m2(op1); + return __riscv_vlmul_ext_v_f32mf2_f32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4( @@ -166,7 +166,7 @@ vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { - return vlmul_ext_v_f32mf2_f32m4(op1); + return __riscv_vlmul_ext_v_f32mf2_f32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8( @@ -175,7 +175,7 @@ vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { - return vlmul_ext_v_f32mf2_f32m8(op1); + return __riscv_vlmul_ext_v_f32mf2_f32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2( @@ -184,7 +184,7 @@ vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { - return vlmul_ext_v_f32m1_f32m2(op1); + return __riscv_vlmul_ext_v_f32m1_f32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4( @@ -193,7 +193,7 @@ vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { - return vlmul_ext_v_f32m1_f32m4(op1); + return __riscv_vlmul_ext_v_f32m1_f32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8( @@ -202,7 +202,7 @@ vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { - return vlmul_ext_v_f32m1_f32m8(op1); + return __riscv_vlmul_ext_v_f32m1_f32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4( @@ -211,7 +211,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { - return vlmul_ext_v_f32m2_f32m4(op1); + return __riscv_vlmul_ext_v_f32m2_f32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8( @@ -220,7 +220,7 @@ vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { - return vlmul_ext_v_f32m2_f32m8(op1); + return __riscv_vlmul_ext_v_f32m2_f32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8( @@ -229,7 +229,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { - return vlmul_ext_v_f32m4_f32m8(op1); + return __riscv_vlmul_ext_v_f32m4_f32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2( @@ -238,7 +238,7 @@ vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { - return vlmul_ext_v_f64m1_f64m2(op1); + return __riscv_vlmul_ext_v_f64m1_f64m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { - return vlmul_ext_v_f64m1_f64m4(op1); + return __riscv_vlmul_ext_v_f64m1_f64m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8( @@ -256,7 +256,7 @@ vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { - return vlmul_ext_v_f64m1_f64m8(op1); + return __riscv_vlmul_ext_v_f64m1_f64m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4( @@ -265,7 +265,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { - return vlmul_ext_v_f64m2_f64m4(op1); + return __riscv_vlmul_ext_v_f64m2_f64m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8( @@ -274,7 +274,7 @@ vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { - return vlmul_ext_v_f64m2_f64m8(op1); + return __riscv_vlmul_ext_v_f64m2_f64m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8( @@ -283,7 +283,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { - return vlmul_ext_v_f64m4_f64m8(op1); + return __riscv_vlmul_ext_v_f64m4_f64m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4( @@ -292,7 +292,7 @@ vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { - return vlmul_ext_v_i8mf8_i8mf4(op1); + return __riscv_vlmul_ext_v_i8mf8_i8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2( @@ -301,7 +301,7 @@ vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { - return vlmul_ext_v_i8mf8_i8mf2(op1); + return __riscv_vlmul_ext_v_i8mf8_i8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1( @@ -310,7 +310,7 @@ vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { - return vlmul_ext_v_i8mf8_i8m1(op1); + return __riscv_vlmul_ext_v_i8mf8_i8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2( @@ -319,7 +319,7 @@ vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { - return vlmul_ext_v_i8mf8_i8m2(op1); + return __riscv_vlmul_ext_v_i8mf8_i8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4( @@ -328,7 +328,7 @@ vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { - return vlmul_ext_v_i8mf8_i8m4(op1); + return __riscv_vlmul_ext_v_i8mf8_i8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8( @@ -337,7 +337,7 @@ vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { - return vlmul_ext_v_i8mf8_i8m8(op1); + return __riscv_vlmul_ext_v_i8mf8_i8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2( @@ -346,7 +346,7 @@ vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { - return vlmul_ext_v_i8mf4_i8mf2(op1); + return __riscv_vlmul_ext_v_i8mf4_i8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1( @@ -355,7 +355,7 @@ vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { - return vlmul_ext_v_i8mf4_i8m1(op1); + return __riscv_vlmul_ext_v_i8mf4_i8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2( @@ -364,7 +364,7 @@ vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { - return vlmul_ext_v_i8mf4_i8m2(op1); + return __riscv_vlmul_ext_v_i8mf4_i8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4( @@ -373,7 +373,7 @@ vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { - return vlmul_ext_v_i8mf4_i8m4(op1); + return __riscv_vlmul_ext_v_i8mf4_i8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8( @@ -382,7 +382,7 @@ vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { - return vlmul_ext_v_i8mf4_i8m8(op1); + return __riscv_vlmul_ext_v_i8mf4_i8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1( @@ -391,7 +391,7 @@ vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { - return vlmul_ext_v_i8mf2_i8m1(op1); + return __riscv_vlmul_ext_v_i8mf2_i8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2( @@ -400,7 +400,7 @@ vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { - return vlmul_ext_v_i8mf2_i8m2(op1); + return __riscv_vlmul_ext_v_i8mf2_i8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4( @@ -409,7 +409,7 @@ vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { - return vlmul_ext_v_i8mf2_i8m4(op1); + return __riscv_vlmul_ext_v_i8mf2_i8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8( @@ -418,7 +418,7 @@ vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { - return vlmul_ext_v_i8mf2_i8m8(op1); + return __riscv_vlmul_ext_v_i8mf2_i8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2( @@ -427,7 +427,7 @@ vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { - return vlmul_ext_v_i8m1_i8m2(op1); + return __riscv_vlmul_ext_v_i8m1_i8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4( @@ -436,7 +436,7 @@ vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { - return vlmul_ext_v_i8m1_i8m4(op1); + return __riscv_vlmul_ext_v_i8m1_i8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8( @@ -445,7 +445,7 @@ vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { - return vlmul_ext_v_i8m1_i8m8(op1); + return __riscv_vlmul_ext_v_i8m1_i8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4( @@ -454,7 +454,7 @@ vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { - return vlmul_ext_v_i8m2_i8m4(op1); + return __riscv_vlmul_ext_v_i8m2_i8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8( @@ -463,7 +463,7 @@ vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { - return vlmul_ext_v_i8m2_i8m8(op1); + return __riscv_vlmul_ext_v_i8m2_i8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8( @@ -472,7 +472,7 @@ vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { - return vlmul_ext_v_i8m4_i8m8(op1); + return __riscv_vlmul_ext_v_i8m4_i8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2( @@ -481,7 +481,7 @@ vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { - return vlmul_ext_v_i16mf4_i16mf2(op1); + return __riscv_vlmul_ext_v_i16mf4_i16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1( @@ -490,7 +490,7 @@ vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { - return vlmul_ext_v_i16mf4_i16m1(op1); + return __riscv_vlmul_ext_v_i16mf4_i16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2( @@ -499,7 +499,7 @@ vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { - return vlmul_ext_v_i16mf4_i16m2(op1); + return __riscv_vlmul_ext_v_i16mf4_i16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4( @@ -508,7 +508,7 @@ vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { - return vlmul_ext_v_i16mf4_i16m4(op1); + return __riscv_vlmul_ext_v_i16mf4_i16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8( @@ -517,7 +517,7 @@ vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { - return vlmul_ext_v_i16mf4_i16m8(op1); + return __riscv_vlmul_ext_v_i16mf4_i16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1( @@ -526,7 +526,7 @@ vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { - return vlmul_ext_v_i16mf2_i16m1(op1); + return __riscv_vlmul_ext_v_i16mf2_i16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2( @@ -535,7 +535,7 @@ vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { - return vlmul_ext_v_i16mf2_i16m2(op1); + return __riscv_vlmul_ext_v_i16mf2_i16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4( @@ -544,7 +544,7 @@ vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { - return vlmul_ext_v_i16mf2_i16m4(op1); + return __riscv_vlmul_ext_v_i16mf2_i16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8( @@ -553,7 +553,7 @@ vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { - return vlmul_ext_v_i16mf2_i16m8(op1); + return __riscv_vlmul_ext_v_i16mf2_i16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2( @@ -562,7 +562,7 @@ vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { - return vlmul_ext_v_i16m1_i16m2(op1); + return __riscv_vlmul_ext_v_i16m1_i16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4( @@ -571,7 +571,7 @@ vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { - return vlmul_ext_v_i16m1_i16m4(op1); + return __riscv_vlmul_ext_v_i16m1_i16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8( @@ -580,7 +580,7 @@ vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { - return vlmul_ext_v_i16m1_i16m8(op1); + return __riscv_vlmul_ext_v_i16m1_i16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4( @@ -589,7 +589,7 @@ vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { - return vlmul_ext_v_i16m2_i16m4(op1); + return __riscv_vlmul_ext_v_i16m2_i16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8( @@ -598,7 +598,7 @@ vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { - return vlmul_ext_v_i16m2_i16m8(op1); + return __riscv_vlmul_ext_v_i16m2_i16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8( @@ -607,7 +607,7 @@ vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { - return vlmul_ext_v_i16m4_i16m8(op1); + return __riscv_vlmul_ext_v_i16m4_i16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1( @@ -616,7 +616,7 @@ vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { - return vlmul_ext_v_i32mf2_i32m1(op1); + return __riscv_vlmul_ext_v_i32mf2_i32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2( @@ -625,7 +625,7 @@ vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { - return vlmul_ext_v_i32mf2_i32m2(op1); + return __riscv_vlmul_ext_v_i32mf2_i32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4( @@ -634,7 +634,7 @@ vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { - return vlmul_ext_v_i32mf2_i32m4(op1); + return __riscv_vlmul_ext_v_i32mf2_i32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8( @@ -643,7 +643,7 @@ vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { - return vlmul_ext_v_i32mf2_i32m8(op1); + return __riscv_vlmul_ext_v_i32mf2_i32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2( @@ -652,7 +652,7 @@ vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { - return vlmul_ext_v_i32m1_i32m2(op1); + return __riscv_vlmul_ext_v_i32m1_i32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4( @@ -661,7 +661,7 @@ vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { - return vlmul_ext_v_i32m1_i32m4(op1); + return __riscv_vlmul_ext_v_i32m1_i32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8( @@ -670,7 +670,7 @@ vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { - return vlmul_ext_v_i32m1_i32m8(op1); + return __riscv_vlmul_ext_v_i32m1_i32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4( @@ -679,7 +679,7 @@ vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { - return vlmul_ext_v_i32m2_i32m4(op1); + return __riscv_vlmul_ext_v_i32m2_i32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8( @@ -688,7 +688,7 @@ vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { - return vlmul_ext_v_i32m2_i32m8(op1); + return __riscv_vlmul_ext_v_i32m2_i32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8( @@ -697,7 +697,7 @@ vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { - return vlmul_ext_v_i32m4_i32m8(op1); + return __riscv_vlmul_ext_v_i32m4_i32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2( @@ -706,7 +706,7 @@ vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { - return vlmul_ext_v_i64m1_i64m2(op1); + return __riscv_vlmul_ext_v_i64m1_i64m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4( @@ -715,7 +715,7 @@ vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { - return vlmul_ext_v_i64m1_i64m4(op1); + return __riscv_vlmul_ext_v_i64m1_i64m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8( @@ -724,7 +724,7 @@ vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { - return vlmul_ext_v_i64m1_i64m8(op1); + return __riscv_vlmul_ext_v_i64m1_i64m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4( @@ -733,7 +733,7 @@ vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { - return vlmul_ext_v_i64m2_i64m4(op1); + return __riscv_vlmul_ext_v_i64m2_i64m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8( @@ -742,7 +742,7 @@ vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { - return vlmul_ext_v_i64m2_i64m8(op1); + return __riscv_vlmul_ext_v_i64m2_i64m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8( @@ -751,7 +751,7 @@ vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { - return vlmul_ext_v_i64m4_i64m8(op1); + return __riscv_vlmul_ext_v_i64m4_i64m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4( @@ -760,7 +760,7 @@ vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { - return vlmul_ext_v_u8mf8_u8mf4(op1); + return __riscv_vlmul_ext_v_u8mf8_u8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2( @@ -769,7 +769,7 @@ vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { - return vlmul_ext_v_u8mf8_u8mf2(op1); + return __riscv_vlmul_ext_v_u8mf8_u8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1( @@ -778,7 +778,7 @@ vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { - return vlmul_ext_v_u8mf8_u8m1(op1); + return __riscv_vlmul_ext_v_u8mf8_u8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2( @@ -787,7 +787,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { - return vlmul_ext_v_u8mf8_u8m2(op1); + return __riscv_vlmul_ext_v_u8mf8_u8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4( @@ -796,7 +796,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { - return vlmul_ext_v_u8mf8_u8m4(op1); + return __riscv_vlmul_ext_v_u8mf8_u8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8( @@ -805,7 +805,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { - return vlmul_ext_v_u8mf8_u8m8(op1); + return __riscv_vlmul_ext_v_u8mf8_u8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2( @@ -814,7 +814,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { - return vlmul_ext_v_u8mf4_u8mf2(op1); + return __riscv_vlmul_ext_v_u8mf4_u8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1( @@ -823,7 +823,7 @@ vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { - return vlmul_ext_v_u8mf4_u8m1(op1); + return __riscv_vlmul_ext_v_u8mf4_u8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2( @@ -832,7 +832,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { - return vlmul_ext_v_u8mf4_u8m2(op1); + return __riscv_vlmul_ext_v_u8mf4_u8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4( @@ -841,7 +841,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { - return vlmul_ext_v_u8mf4_u8m4(op1); + return __riscv_vlmul_ext_v_u8mf4_u8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8( @@ -850,7 +850,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { - return vlmul_ext_v_u8mf4_u8m8(op1); + return __riscv_vlmul_ext_v_u8mf4_u8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1( @@ -859,7 +859,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { - return vlmul_ext_v_u8mf2_u8m1(op1); + return __riscv_vlmul_ext_v_u8mf2_u8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2( @@ -868,7 +868,7 @@ vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { - return vlmul_ext_v_u8mf2_u8m2(op1); + return __riscv_vlmul_ext_v_u8mf2_u8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4( @@ -877,7 +877,7 @@ vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { - return vlmul_ext_v_u8mf2_u8m4(op1); + return __riscv_vlmul_ext_v_u8mf2_u8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8( @@ -886,7 +886,7 @@ vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { - return vlmul_ext_v_u8mf2_u8m8(op1); + return __riscv_vlmul_ext_v_u8mf2_u8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2( @@ -895,7 +895,7 @@ vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { - return vlmul_ext_v_u8m1_u8m2(op1); + return __riscv_vlmul_ext_v_u8m1_u8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4( @@ -904,7 +904,7 @@ vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { - return vlmul_ext_v_u8m1_u8m4(op1); + return __riscv_vlmul_ext_v_u8m1_u8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8( @@ -913,7 +913,7 @@ vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { - return vlmul_ext_v_u8m1_u8m8(op1); + return __riscv_vlmul_ext_v_u8m1_u8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4( @@ -922,7 +922,7 @@ vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { - return vlmul_ext_v_u8m2_u8m4(op1); + return __riscv_vlmul_ext_v_u8m2_u8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8( @@ -931,7 +931,7 @@ vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { - return vlmul_ext_v_u8m2_u8m8(op1); + return __riscv_vlmul_ext_v_u8m2_u8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8( @@ -940,7 +940,7 @@ vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { - return vlmul_ext_v_u8m4_u8m8(op1); + return __riscv_vlmul_ext_v_u8m4_u8m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2( @@ -949,7 +949,7 @@ vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { - return vlmul_ext_v_u16mf4_u16mf2(op1); + return __riscv_vlmul_ext_v_u16mf4_u16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1( @@ -958,7 +958,7 @@ vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { - return vlmul_ext_v_u16mf4_u16m1(op1); + return __riscv_vlmul_ext_v_u16mf4_u16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2( @@ -967,7 +967,7 @@ vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { - return vlmul_ext_v_u16mf4_u16m2(op1); + return __riscv_vlmul_ext_v_u16mf4_u16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4( @@ -976,7 +976,7 @@ vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { - return vlmul_ext_v_u16mf4_u16m4(op1); + return __riscv_vlmul_ext_v_u16mf4_u16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8( @@ -985,7 +985,7 @@ vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { - return vlmul_ext_v_u16mf4_u16m8(op1); + return __riscv_vlmul_ext_v_u16mf4_u16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1( @@ -994,7 +994,7 @@ vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { - return vlmul_ext_v_u16mf2_u16m1(op1); + return __riscv_vlmul_ext_v_u16mf2_u16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2( @@ -1003,7 +1003,7 @@ vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { - return vlmul_ext_v_u16mf2_u16m2(op1); + return __riscv_vlmul_ext_v_u16mf2_u16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4( @@ -1012,7 +1012,7 @@ vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { - return vlmul_ext_v_u16mf2_u16m4(op1); + return __riscv_vlmul_ext_v_u16mf2_u16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8( @@ -1021,7 +1021,7 @@ vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { - return vlmul_ext_v_u16mf2_u16m8(op1); + return __riscv_vlmul_ext_v_u16mf2_u16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2( @@ -1030,7 +1030,7 @@ vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { - return vlmul_ext_v_u16m1_u16m2(op1); + return __riscv_vlmul_ext_v_u16m1_u16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4( @@ -1039,7 +1039,7 @@ vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { - return vlmul_ext_v_u16m1_u16m4(op1); + return __riscv_vlmul_ext_v_u16m1_u16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8( @@ -1048,7 +1048,7 @@ vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { - return vlmul_ext_v_u16m1_u16m8(op1); + return __riscv_vlmul_ext_v_u16m1_u16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4( @@ -1057,7 +1057,7 @@ vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { - return vlmul_ext_v_u16m2_u16m4(op1); + return __riscv_vlmul_ext_v_u16m2_u16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8( @@ -1066,7 +1066,7 @@ vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { - return vlmul_ext_v_u16m2_u16m8(op1); + return __riscv_vlmul_ext_v_u16m2_u16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8( @@ -1075,7 +1075,7 @@ vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { - return vlmul_ext_v_u16m4_u16m8(op1); + return __riscv_vlmul_ext_v_u16m4_u16m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1( @@ -1084,7 +1084,7 @@ vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { - return vlmul_ext_v_u32mf2_u32m1(op1); + return __riscv_vlmul_ext_v_u32mf2_u32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2( @@ -1093,7 +1093,7 @@ vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { - return vlmul_ext_v_u32mf2_u32m2(op1); + return __riscv_vlmul_ext_v_u32mf2_u32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4( @@ -1102,7 +1102,7 @@ vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { - return vlmul_ext_v_u32mf2_u32m4(op1); + return __riscv_vlmul_ext_v_u32mf2_u32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8( @@ -1111,7 +1111,7 @@ vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { - return vlmul_ext_v_u32mf2_u32m8(op1); + return __riscv_vlmul_ext_v_u32mf2_u32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2( @@ -1120,7 +1120,7 @@ vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { - return vlmul_ext_v_u32m1_u32m2(op1); + return __riscv_vlmul_ext_v_u32m1_u32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4( @@ -1129,7 +1129,7 @@ vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { - return vlmul_ext_v_u32m1_u32m4(op1); + return __riscv_vlmul_ext_v_u32m1_u32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8( @@ -1138,7 +1138,7 @@ vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { - return vlmul_ext_v_u32m1_u32m8(op1); + return __riscv_vlmul_ext_v_u32m1_u32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4( @@ -1147,7 +1147,7 @@ vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { - return vlmul_ext_v_u32m2_u32m4(op1); + return __riscv_vlmul_ext_v_u32m2_u32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8( @@ -1156,7 +1156,7 @@ vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { - return vlmul_ext_v_u32m2_u32m8(op1); + return __riscv_vlmul_ext_v_u32m2_u32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8( @@ -1165,7 +1165,7 @@ vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { - return vlmul_ext_v_u32m4_u32m8(op1); + return __riscv_vlmul_ext_v_u32m4_u32m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2( @@ -1174,7 +1174,7 @@ vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { - return vlmul_ext_v_u64m1_u64m2(op1); + return __riscv_vlmul_ext_v_u64m1_u64m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4( @@ -1183,7 +1183,7 @@ vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { - return vlmul_ext_v_u64m1_u64m4(op1); + return __riscv_vlmul_ext_v_u64m1_u64m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8( @@ -1192,7 +1192,7 @@ vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { - return vlmul_ext_v_u64m1_u64m8(op1); + return __riscv_vlmul_ext_v_u64m1_u64m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4( @@ -1201,7 +1201,7 @@ vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { - return vlmul_ext_v_u64m2_u64m4(op1); + return __riscv_vlmul_ext_v_u64m2_u64m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8( @@ -1210,7 +1210,7 @@ vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { - return vlmul_ext_v_u64m2_u64m8(op1); + return __riscv_vlmul_ext_v_u64m2_u64m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8( @@ -1219,7 +1219,7 @@ vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { - return vlmul_ext_v_u64m4_u64m8(op1); + return __riscv_vlmul_ext_v_u64m4_u64m8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4( @@ -1228,7 +1228,7 @@ vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1) { - return vlmul_trunc_v_f16mf2_f16mf4(op1); + return __riscv_vlmul_trunc_v_f16mf2_f16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf4( @@ -1237,7 +1237,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1) { - return vlmul_trunc_v_f16m1_f16mf4(op1); + return __riscv_vlmul_trunc_v_f16m1_f16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf2( @@ -1246,7 +1246,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1) { - return vlmul_trunc_v_f16m1_f16mf2(op1); + return __riscv_vlmul_trunc_v_f16m1_f16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf4( @@ -1255,7 +1255,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1) { - return vlmul_trunc_v_f16m2_f16mf4(op1); + return __riscv_vlmul_trunc_v_f16m2_f16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf2( @@ -1264,7 +1264,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1) { - return vlmul_trunc_v_f16m2_f16mf2(op1); + return __riscv_vlmul_trunc_v_f16m2_f16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16m1( @@ -1273,7 +1273,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1) { - return vlmul_trunc_v_f16m2_f16m1(op1); + return __riscv_vlmul_trunc_v_f16m2_f16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf4( @@ -1282,7 +1282,7 @@ vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1) { - return vlmul_trunc_v_f16m4_f16mf4(op1); + return __riscv_vlmul_trunc_v_f16m4_f16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf2( @@ -1291,7 +1291,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1) { - return vlmul_trunc_v_f16m4_f16mf2(op1); + return __riscv_vlmul_trunc_v_f16m4_f16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m1( @@ -1300,7 +1300,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1) { - return vlmul_trunc_v_f16m4_f16m1(op1); + return __riscv_vlmul_trunc_v_f16m4_f16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m2( @@ -1309,7 +1309,7 @@ vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1) { - return vlmul_trunc_v_f16m4_f16m2(op1); + return __riscv_vlmul_trunc_v_f16m4_f16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf4( @@ -1318,7 +1318,7 @@ vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1) { - return vlmul_trunc_v_f16m8_f16mf4(op1); + return __riscv_vlmul_trunc_v_f16m8_f16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf2( @@ -1327,7 +1327,7 @@ vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1) { - return vlmul_trunc_v_f16m8_f16mf2(op1); + return __riscv_vlmul_trunc_v_f16m8_f16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m1( @@ -1336,7 +1336,7 @@ vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1) { - return vlmul_trunc_v_f16m8_f16m1(op1); + return __riscv_vlmul_trunc_v_f16m8_f16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m2( @@ -1345,7 +1345,7 @@ vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1) { - return vlmul_trunc_v_f16m8_f16m2(op1); + return __riscv_vlmul_trunc_v_f16m8_f16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m4( @@ -1354,7 +1354,7 @@ vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1) { - return vlmul_trunc_v_f16m8_f16m4(op1); + return __riscv_vlmul_trunc_v_f16m8_f16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( @@ -1363,7 +1363,7 @@ vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { - return vlmul_trunc_v_f32m1_f32mf2(op1); + return __riscv_vlmul_trunc_v_f32m1_f32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( @@ -1372,7 +1372,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { - return vlmul_trunc_v_f32m2_f32mf2(op1); + return __riscv_vlmul_trunc_v_f32m2_f32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( @@ -1381,7 +1381,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { - return vlmul_trunc_v_f32m2_f32m1(op1); + return __riscv_vlmul_trunc_v_f32m2_f32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( @@ -1390,7 +1390,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { - return vlmul_trunc_v_f32m4_f32mf2(op1); + return __riscv_vlmul_trunc_v_f32m4_f32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( @@ -1399,7 +1399,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { - return vlmul_trunc_v_f32m4_f32m1(op1); + return __riscv_vlmul_trunc_v_f32m4_f32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( @@ -1408,7 +1408,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { - return vlmul_trunc_v_f32m4_f32m2(op1); + return __riscv_vlmul_trunc_v_f32m4_f32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( @@ -1417,7 +1417,7 @@ vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { - return vlmul_trunc_v_f32m8_f32mf2(op1); + return __riscv_vlmul_trunc_v_f32m8_f32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( @@ -1426,7 +1426,7 @@ vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { - return vlmul_trunc_v_f32m8_f32m1(op1); + return __riscv_vlmul_trunc_v_f32m8_f32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( @@ -1435,7 +1435,7 @@ vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { - return vlmul_trunc_v_f32m8_f32m2(op1); + return __riscv_vlmul_trunc_v_f32m8_f32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( @@ -1444,7 +1444,7 @@ vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { - return vlmul_trunc_v_f32m8_f32m4(op1); + return __riscv_vlmul_trunc_v_f32m8_f32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( @@ -1453,7 +1453,7 @@ vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { - return vlmul_trunc_v_f64m2_f64m1(op1); + return __riscv_vlmul_trunc_v_f64m2_f64m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( @@ -1462,7 +1462,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { - return vlmul_trunc_v_f64m4_f64m1(op1); + return __riscv_vlmul_trunc_v_f64m4_f64m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( @@ -1471,7 +1471,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { - return vlmul_trunc_v_f64m4_f64m2(op1); + return __riscv_vlmul_trunc_v_f64m4_f64m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( @@ -1480,7 +1480,7 @@ vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { - return vlmul_trunc_v_f64m8_f64m1(op1); + return __riscv_vlmul_trunc_v_f64m8_f64m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( @@ -1489,7 +1489,7 @@ vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { - return vlmul_trunc_v_f64m8_f64m2(op1); + return __riscv_vlmul_trunc_v_f64m8_f64m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( @@ -1498,7 +1498,7 @@ vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { - return vlmul_trunc_v_f64m8_f64m4(op1); + return __riscv_vlmul_trunc_v_f64m8_f64m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( @@ -1507,7 +1507,7 @@ vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { - return vlmul_trunc_v_i8mf4_i8mf8(op1); + return __riscv_vlmul_trunc_v_i8mf4_i8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( @@ -1516,7 +1516,7 @@ vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { - return vlmul_trunc_v_i8mf2_i8mf8(op1); + return __riscv_vlmul_trunc_v_i8mf2_i8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( @@ -1525,7 +1525,7 @@ vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { - return vlmul_trunc_v_i8mf2_i8mf4(op1); + return __riscv_vlmul_trunc_v_i8mf2_i8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( @@ -1534,7 +1534,7 @@ vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { - return vlmul_trunc_v_i8m1_i8mf8(op1); + return __riscv_vlmul_trunc_v_i8m1_i8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( @@ -1543,7 +1543,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { - return vlmul_trunc_v_i8m1_i8mf4(op1); + return __riscv_vlmul_trunc_v_i8m1_i8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( @@ -1552,7 +1552,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { - return vlmul_trunc_v_i8m1_i8mf2(op1); + return __riscv_vlmul_trunc_v_i8m1_i8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( @@ -1561,7 +1561,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { - return vlmul_trunc_v_i8m2_i8mf8(op1); + return __riscv_vlmul_trunc_v_i8m2_i8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( @@ -1570,7 +1570,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { - return vlmul_trunc_v_i8m2_i8mf4(op1); + return __riscv_vlmul_trunc_v_i8m2_i8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( @@ -1579,7 +1579,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { - return vlmul_trunc_v_i8m2_i8mf2(op1); + return __riscv_vlmul_trunc_v_i8m2_i8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( @@ -1588,7 +1588,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { - return vlmul_trunc_v_i8m2_i8m1(op1); + return __riscv_vlmul_trunc_v_i8m2_i8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( @@ -1597,7 +1597,7 @@ vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { - return vlmul_trunc_v_i8m4_i8mf8(op1); + return __riscv_vlmul_trunc_v_i8m4_i8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( @@ -1606,7 +1606,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { - return vlmul_trunc_v_i8m4_i8mf4(op1); + return __riscv_vlmul_trunc_v_i8m4_i8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( @@ -1615,7 +1615,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { - return vlmul_trunc_v_i8m4_i8mf2(op1); + return __riscv_vlmul_trunc_v_i8m4_i8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( @@ -1624,7 +1624,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { - return vlmul_trunc_v_i8m4_i8m1(op1); + return __riscv_vlmul_trunc_v_i8m4_i8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( @@ -1633,7 +1633,7 @@ vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { - return vlmul_trunc_v_i8m4_i8m2(op1); + return __riscv_vlmul_trunc_v_i8m4_i8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( @@ -1642,7 +1642,7 @@ vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { - return vlmul_trunc_v_i8m8_i8mf8(op1); + return __riscv_vlmul_trunc_v_i8m8_i8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( @@ -1651,7 +1651,7 @@ vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { - return vlmul_trunc_v_i8m8_i8mf4(op1); + return __riscv_vlmul_trunc_v_i8m8_i8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( @@ -1660,7 +1660,7 @@ vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { - return vlmul_trunc_v_i8m8_i8mf2(op1); + return __riscv_vlmul_trunc_v_i8m8_i8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( @@ -1669,7 +1669,7 @@ vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { - return vlmul_trunc_v_i8m8_i8m1(op1); + return __riscv_vlmul_trunc_v_i8m8_i8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( @@ -1678,7 +1678,7 @@ vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { - return vlmul_trunc_v_i8m8_i8m2(op1); + return __riscv_vlmul_trunc_v_i8m8_i8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( @@ -1687,7 +1687,7 @@ vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { - return vlmul_trunc_v_i8m8_i8m4(op1); + return __riscv_vlmul_trunc_v_i8m8_i8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( @@ -1696,7 +1696,7 @@ vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { - return vlmul_trunc_v_i16mf2_i16mf4(op1); + return __riscv_vlmul_trunc_v_i16mf2_i16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( @@ -1705,7 +1705,7 @@ vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { - return vlmul_trunc_v_i16m1_i16mf4(op1); + return __riscv_vlmul_trunc_v_i16m1_i16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( @@ -1714,7 +1714,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { - return vlmul_trunc_v_i16m1_i16mf2(op1); + return __riscv_vlmul_trunc_v_i16m1_i16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( @@ -1723,7 +1723,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { - return vlmul_trunc_v_i16m2_i16mf4(op1); + return __riscv_vlmul_trunc_v_i16m2_i16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( @@ -1732,7 +1732,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { - return vlmul_trunc_v_i16m2_i16mf2(op1); + return __riscv_vlmul_trunc_v_i16m2_i16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( @@ -1741,7 +1741,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { - return vlmul_trunc_v_i16m2_i16m1(op1); + return __riscv_vlmul_trunc_v_i16m2_i16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( @@ -1750,7 +1750,7 @@ vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { - return vlmul_trunc_v_i16m4_i16mf4(op1); + return __riscv_vlmul_trunc_v_i16m4_i16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( @@ -1759,7 +1759,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { - return vlmul_trunc_v_i16m4_i16mf2(op1); + return __riscv_vlmul_trunc_v_i16m4_i16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( @@ -1768,7 +1768,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { - return vlmul_trunc_v_i16m4_i16m1(op1); + return __riscv_vlmul_trunc_v_i16m4_i16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( @@ -1777,7 +1777,7 @@ vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { - return vlmul_trunc_v_i16m4_i16m2(op1); + return __riscv_vlmul_trunc_v_i16m4_i16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( @@ -1786,7 +1786,7 @@ vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { - return vlmul_trunc_v_i16m8_i16mf4(op1); + return __riscv_vlmul_trunc_v_i16m8_i16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( @@ -1795,7 +1795,7 @@ vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { - return vlmul_trunc_v_i16m8_i16mf2(op1); + return __riscv_vlmul_trunc_v_i16m8_i16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( @@ -1804,7 +1804,7 @@ vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { - return vlmul_trunc_v_i16m8_i16m1(op1); + return __riscv_vlmul_trunc_v_i16m8_i16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( @@ -1813,7 +1813,7 @@ vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { - return vlmul_trunc_v_i16m8_i16m2(op1); + return __riscv_vlmul_trunc_v_i16m8_i16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( @@ -1822,7 +1822,7 @@ vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { - return vlmul_trunc_v_i16m8_i16m4(op1); + return __riscv_vlmul_trunc_v_i16m8_i16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( @@ -1831,7 +1831,7 @@ vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { - return vlmul_trunc_v_i32m1_i32mf2(op1); + return __riscv_vlmul_trunc_v_i32m1_i32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( @@ -1840,7 +1840,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { - return vlmul_trunc_v_i32m2_i32mf2(op1); + return __riscv_vlmul_trunc_v_i32m2_i32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( @@ -1849,7 +1849,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { - return vlmul_trunc_v_i32m2_i32m1(op1); + return __riscv_vlmul_trunc_v_i32m2_i32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( @@ -1858,7 +1858,7 @@ vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { - return vlmul_trunc_v_i32m4_i32mf2(op1); + return __riscv_vlmul_trunc_v_i32m4_i32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( @@ -1867,7 +1867,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { - return vlmul_trunc_v_i32m4_i32m1(op1); + return __riscv_vlmul_trunc_v_i32m4_i32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( @@ -1876,7 +1876,7 @@ vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { - return vlmul_trunc_v_i32m4_i32m2(op1); + return __riscv_vlmul_trunc_v_i32m4_i32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( @@ -1885,7 +1885,7 @@ vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { - return vlmul_trunc_v_i32m8_i32mf2(op1); + return __riscv_vlmul_trunc_v_i32m8_i32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( @@ -1894,7 +1894,7 @@ vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { - return vlmul_trunc_v_i32m8_i32m1(op1); + return __riscv_vlmul_trunc_v_i32m8_i32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( @@ -1903,7 +1903,7 @@ vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { - return vlmul_trunc_v_i32m8_i32m2(op1); + return __riscv_vlmul_trunc_v_i32m8_i32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( @@ -1912,7 +1912,7 @@ vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { - return vlmul_trunc_v_i32m8_i32m4(op1); + return __riscv_vlmul_trunc_v_i32m8_i32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( @@ -1921,7 +1921,7 @@ vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { - return vlmul_trunc_v_i64m2_i64m1(op1); + return __riscv_vlmul_trunc_v_i64m2_i64m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( @@ -1930,7 +1930,7 @@ vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { - return vlmul_trunc_v_i64m4_i64m1(op1); + return __riscv_vlmul_trunc_v_i64m4_i64m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( @@ -1939,7 +1939,7 @@ vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { - return vlmul_trunc_v_i64m4_i64m2(op1); + return __riscv_vlmul_trunc_v_i64m4_i64m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( @@ -1948,7 +1948,7 @@ vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { - return vlmul_trunc_v_i64m8_i64m1(op1); + return __riscv_vlmul_trunc_v_i64m8_i64m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( @@ -1957,7 +1957,7 @@ vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { - return vlmul_trunc_v_i64m8_i64m2(op1); + return __riscv_vlmul_trunc_v_i64m8_i64m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( @@ -1966,7 +1966,7 @@ vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { - return vlmul_trunc_v_i64m8_i64m4(op1); + return __riscv_vlmul_trunc_v_i64m8_i64m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( @@ -1975,7 +1975,7 @@ vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { - return vlmul_trunc_v_u8mf4_u8mf8(op1); + return __riscv_vlmul_trunc_v_u8mf4_u8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( @@ -1984,7 +1984,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { - return vlmul_trunc_v_u8mf2_u8mf8(op1); + return __riscv_vlmul_trunc_v_u8mf2_u8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( @@ -1993,7 +1993,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { - return vlmul_trunc_v_u8mf2_u8mf4(op1); + return __riscv_vlmul_trunc_v_u8mf2_u8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( @@ -2002,7 +2002,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { - return vlmul_trunc_v_u8m1_u8mf8(op1); + return __riscv_vlmul_trunc_v_u8m1_u8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( @@ -2011,7 +2011,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { - return vlmul_trunc_v_u8m1_u8mf4(op1); + return __riscv_vlmul_trunc_v_u8m1_u8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( @@ -2020,7 +2020,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { - return vlmul_trunc_v_u8m1_u8mf2(op1); + return __riscv_vlmul_trunc_v_u8m1_u8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( @@ -2029,7 +2029,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { - return vlmul_trunc_v_u8m2_u8mf8(op1); + return __riscv_vlmul_trunc_v_u8m2_u8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( @@ -2038,7 +2038,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { - return vlmul_trunc_v_u8m2_u8mf4(op1); + return __riscv_vlmul_trunc_v_u8m2_u8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( @@ -2047,7 +2047,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { - return vlmul_trunc_v_u8m2_u8mf2(op1); + return __riscv_vlmul_trunc_v_u8m2_u8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( @@ -2056,7 +2056,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { - return vlmul_trunc_v_u8m2_u8m1(op1); + return __riscv_vlmul_trunc_v_u8m2_u8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( @@ -2065,7 +2065,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { - return vlmul_trunc_v_u8m4_u8mf8(op1); + return __riscv_vlmul_trunc_v_u8m4_u8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( @@ -2074,7 +2074,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { - return vlmul_trunc_v_u8m4_u8mf4(op1); + return __riscv_vlmul_trunc_v_u8m4_u8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( @@ -2083,7 +2083,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { - return vlmul_trunc_v_u8m4_u8mf2(op1); + return __riscv_vlmul_trunc_v_u8m4_u8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( @@ -2092,7 +2092,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { - return vlmul_trunc_v_u8m4_u8m1(op1); + return __riscv_vlmul_trunc_v_u8m4_u8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( @@ -2101,7 +2101,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { - return vlmul_trunc_v_u8m4_u8m2(op1); + return __riscv_vlmul_trunc_v_u8m4_u8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( @@ -2110,7 +2110,7 @@ vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { - return vlmul_trunc_v_u8m8_u8mf8(op1); + return __riscv_vlmul_trunc_v_u8m8_u8mf8(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( @@ -2119,7 +2119,7 @@ vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { - return vlmul_trunc_v_u8m8_u8mf4(op1); + return __riscv_vlmul_trunc_v_u8m8_u8mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( @@ -2128,7 +2128,7 @@ vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { - return vlmul_trunc_v_u8m8_u8mf2(op1); + return __riscv_vlmul_trunc_v_u8m8_u8mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( @@ -2137,7 +2137,7 @@ vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { - return vlmul_trunc_v_u8m8_u8m1(op1); + return __riscv_vlmul_trunc_v_u8m8_u8m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( @@ -2146,7 +2146,7 @@ vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { - return vlmul_trunc_v_u8m8_u8m2(op1); + return __riscv_vlmul_trunc_v_u8m8_u8m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( @@ -2155,7 +2155,7 @@ vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { - return vlmul_trunc_v_u8m8_u8m4(op1); + return __riscv_vlmul_trunc_v_u8m8_u8m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( @@ -2164,7 +2164,7 @@ vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { - return vlmul_trunc_v_u16mf2_u16mf4(op1); + return __riscv_vlmul_trunc_v_u16mf2_u16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( @@ -2173,7 +2173,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { - return vlmul_trunc_v_u16m1_u16mf4(op1); + return __riscv_vlmul_trunc_v_u16m1_u16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( @@ -2182,7 +2182,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { - return vlmul_trunc_v_u16m1_u16mf2(op1); + return __riscv_vlmul_trunc_v_u16m1_u16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( @@ -2191,7 +2191,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { - return vlmul_trunc_v_u16m2_u16mf4(op1); + return __riscv_vlmul_trunc_v_u16m2_u16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( @@ -2200,7 +2200,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { - return vlmul_trunc_v_u16m2_u16mf2(op1); + return __riscv_vlmul_trunc_v_u16m2_u16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( @@ -2209,7 +2209,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { - return vlmul_trunc_v_u16m2_u16m1(op1); + return __riscv_vlmul_trunc_v_u16m2_u16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( @@ -2218,7 +2218,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { - return vlmul_trunc_v_u16m4_u16mf4(op1); + return __riscv_vlmul_trunc_v_u16m4_u16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( @@ -2227,7 +2227,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { - return vlmul_trunc_v_u16m4_u16mf2(op1); + return __riscv_vlmul_trunc_v_u16m4_u16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( @@ -2236,7 +2236,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { - return vlmul_trunc_v_u16m4_u16m1(op1); + return __riscv_vlmul_trunc_v_u16m4_u16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( @@ -2245,7 +2245,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { - return vlmul_trunc_v_u16m4_u16m2(op1); + return __riscv_vlmul_trunc_v_u16m4_u16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( @@ -2254,7 +2254,7 @@ vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { - return vlmul_trunc_v_u16m8_u16mf4(op1); + return __riscv_vlmul_trunc_v_u16m8_u16mf4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( @@ -2263,7 +2263,7 @@ vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { - return vlmul_trunc_v_u16m8_u16mf2(op1); + return __riscv_vlmul_trunc_v_u16m8_u16mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( @@ -2272,7 +2272,7 @@ vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { - return vlmul_trunc_v_u16m8_u16m1(op1); + return __riscv_vlmul_trunc_v_u16m8_u16m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( @@ -2281,7 +2281,7 @@ vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { - return vlmul_trunc_v_u16m8_u16m2(op1); + return __riscv_vlmul_trunc_v_u16m8_u16m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( @@ -2290,7 +2290,7 @@ vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { - return vlmul_trunc_v_u16m8_u16m4(op1); + return __riscv_vlmul_trunc_v_u16m8_u16m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( @@ -2299,7 +2299,7 @@ vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { - return vlmul_trunc_v_u32m1_u32mf2(op1); + return __riscv_vlmul_trunc_v_u32m1_u32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( @@ -2308,7 +2308,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { - return vlmul_trunc_v_u32m2_u32mf2(op1); + return __riscv_vlmul_trunc_v_u32m2_u32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( @@ -2317,7 +2317,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { - return vlmul_trunc_v_u32m2_u32m1(op1); + return __riscv_vlmul_trunc_v_u32m2_u32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( @@ -2326,7 +2326,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { - return vlmul_trunc_v_u32m4_u32mf2(op1); + return __riscv_vlmul_trunc_v_u32m4_u32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( @@ -2335,7 +2335,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { - return vlmul_trunc_v_u32m4_u32m1(op1); + return __riscv_vlmul_trunc_v_u32m4_u32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( @@ -2344,7 +2344,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { - return vlmul_trunc_v_u32m4_u32m2(op1); + return __riscv_vlmul_trunc_v_u32m4_u32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( @@ -2353,7 +2353,7 @@ vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { - return vlmul_trunc_v_u32m8_u32mf2(op1); + return __riscv_vlmul_trunc_v_u32m8_u32mf2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( @@ -2362,7 +2362,7 @@ vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { - return vlmul_trunc_v_u32m8_u32m1(op1); + return __riscv_vlmul_trunc_v_u32m8_u32m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( @@ -2371,7 +2371,7 @@ vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { - return vlmul_trunc_v_u32m8_u32m2(op1); + return __riscv_vlmul_trunc_v_u32m8_u32m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( @@ -2380,7 +2380,7 @@ vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { - return vlmul_trunc_v_u32m8_u32m4(op1); + return __riscv_vlmul_trunc_v_u32m8_u32m4(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( @@ -2389,7 +2389,7 @@ vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { - return vlmul_trunc_v_u64m2_u64m1(op1); + return __riscv_vlmul_trunc_v_u64m2_u64m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( @@ -2398,7 +2398,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { - return vlmul_trunc_v_u64m4_u64m1(op1); + return __riscv_vlmul_trunc_v_u64m4_u64m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( @@ -2407,7 +2407,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { - return vlmul_trunc_v_u64m4_u64m2(op1); + return __riscv_vlmul_trunc_v_u64m4_u64m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( @@ -2416,7 +2416,7 @@ vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { - return vlmul_trunc_v_u64m8_u64m1(op1); + return __riscv_vlmul_trunc_v_u64m8_u64m1(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( @@ -2425,7 +2425,7 @@ vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { - return vlmul_trunc_v_u64m8_u64m2(op1); + return __riscv_vlmul_trunc_v_u64m8_u64m2(op1); } // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( @@ -2434,6 +2434,6 @@ vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { - return vlmul_trunc_v_u64m8_u64m4(op1); + return __riscv_vlmul_trunc_v_u64m8_u64m4(op1); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei16.c index 6b9eae9d6684..d99a4afd756b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f16mf4(base, bindex, vl); + return __riscv_vloxei16_v_f16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f16mf2(base, bindex, vl); + return __riscv_vloxei16_v_f16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f16m1(base, bindex, vl); + return __riscv_vloxei16_v_f16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f16m2(base, bindex, vl); + return __riscv_vloxei16_v_f16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f16m4(base, bindex, vl); + return __riscv_vloxei16_v_f16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_f16m8(base, bindex, vl); + return __riscv_vloxei16_v_f16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vloxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f32mf2(base, bindex, vl); + return __riscv_vloxei16_v_f32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f32m1(base, bindex, vl); + return __riscv_vloxei16_v_f32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f32m2(base, bindex, vl); + return __riscv_vloxei16_v_f32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f32m4(base, bindex, vl); + return __riscv_vloxei16_v_f32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f32m8(base, bindex, vl); + return __riscv_vloxei16_v_f32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f64m1(base, bindex, vl); + return __riscv_vloxei16_v_f64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f64m2(base, bindex, vl); + return __riscv_vloxei16_v_f64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f64m4(base, bindex, vl); + return __riscv_vloxei16_v_f64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f64m8(base, bindex, vl); + return __riscv_vloxei16_v_f64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8( @@ -148,7 +148,7 @@ vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i8mf8(base, bindex, vl); + return __riscv_vloxei16_v_i8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4( @@ -157,7 +157,7 @@ vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i8mf4(base, bindex, vl); + return __riscv_vloxei16_v_i8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2( @@ -166,7 +166,7 @@ vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i8mf2(base, bindex, vl); + return __riscv_vloxei16_v_i8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1( @@ -175,7 +175,7 @@ vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i8m1(base, bindex, vl); + return __riscv_vloxei16_v_i8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2( @@ -184,7 +184,7 @@ vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i8m2(base, bindex, vl); + return __riscv_vloxei16_v_i8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4( @@ -193,7 +193,7 @@ vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i8m4(base, bindex, vl); + return __riscv_vloxei16_v_i8m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4( @@ -202,7 +202,7 @@ vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i16mf4(base, bindex, vl); + return __riscv_vloxei16_v_i16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2( @@ -211,7 +211,7 @@ vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i16mf2(base, bindex, vl); + return __riscv_vloxei16_v_i16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1( @@ -220,7 +220,7 @@ vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i16m1(base, bindex, vl); + return __riscv_vloxei16_v_i16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2( @@ -229,7 +229,7 @@ vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i16m2(base, bindex, vl); + return __riscv_vloxei16_v_i16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4( @@ -238,7 +238,7 @@ vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i16m4(base, bindex, vl); + return __riscv_vloxei16_v_i16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8( @@ -247,7 +247,7 @@ vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i16m8(base, bindex, vl); + return __riscv_vloxei16_v_i16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2( @@ -256,7 +256,7 @@ vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i32mf2(base, bindex, vl); + return __riscv_vloxei16_v_i32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1( @@ -265,7 +265,7 @@ vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i32m1(base, bindex, vl); + return __riscv_vloxei16_v_i32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2( @@ -274,7 +274,7 @@ vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i32m2(base, bindex, vl); + return __riscv_vloxei16_v_i32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4( @@ -283,7 +283,7 @@ vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i32m4(base, bindex, vl); + return __riscv_vloxei16_v_i32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8( @@ -292,7 +292,7 @@ vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i32m8(base, bindex, vl); + return __riscv_vloxei16_v_i32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1( @@ -301,7 +301,7 @@ vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i64m1(base, bindex, vl); + return __riscv_vloxei16_v_i64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2( @@ -310,7 +310,7 @@ vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i64m2(base, bindex, vl); + return __riscv_vloxei16_v_i64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4( @@ -319,7 +319,7 @@ vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i64m4(base, bindex, vl); + return __riscv_vloxei16_v_i64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8( @@ -328,7 +328,7 @@ vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i64m8(base, bindex, vl); + return __riscv_vloxei16_v_i64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8( @@ -337,7 +337,7 @@ vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u8mf8(base, bindex, vl); + return __riscv_vloxei16_v_u8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4( @@ -346,7 +346,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u8mf4(base, bindex, vl); + return __riscv_vloxei16_v_u8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2( @@ -355,7 +355,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u8mf2(base, bindex, vl); + return __riscv_vloxei16_v_u8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1( @@ -364,7 +364,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u8m1(base, bindex, vl); + return __riscv_vloxei16_v_u8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2( @@ -373,7 +373,7 @@ vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u8m2(base, bindex, vl); + return __riscv_vloxei16_v_u8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4( @@ -382,7 +382,7 @@ vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u8m4(base, bindex, vl); + return __riscv_vloxei16_v_u8m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4( @@ -391,7 +391,7 @@ vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u16mf4(base, bindex, vl); + return __riscv_vloxei16_v_u16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2( @@ -400,7 +400,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u16mf2(base, bindex, vl); + return __riscv_vloxei16_v_u16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1( @@ -409,7 +409,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u16m1(base, bindex, vl); + return __riscv_vloxei16_v_u16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2( @@ -418,7 +418,7 @@ vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u16m2(base, bindex, vl); + return __riscv_vloxei16_v_u16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4( @@ -427,7 +427,7 @@ vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u16m4(base, bindex, vl); + return __riscv_vloxei16_v_u16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8( @@ -436,7 +436,7 @@ vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u16m8(base, bindex, vl); + return __riscv_vloxei16_v_u16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2( @@ -445,7 +445,7 @@ vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u32mf2(base, bindex, vl); + return __riscv_vloxei16_v_u32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1( @@ -454,7 +454,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u32m1(base, bindex, vl); + return __riscv_vloxei16_v_u32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2( @@ -463,7 +463,7 @@ vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u32m2(base, bindex, vl); + return __riscv_vloxei16_v_u32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4( @@ -472,7 +472,7 @@ vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u32m4(base, bindex, vl); + return __riscv_vloxei16_v_u32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8( @@ -481,7 +481,7 @@ vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u32m8(base, bindex, vl); + return __riscv_vloxei16_v_u32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1( @@ -490,7 +490,7 @@ vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u64m1(base, bindex, vl); + return __riscv_vloxei16_v_u64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2( @@ -499,7 +499,7 @@ vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u64m2(base, bindex, vl); + return __riscv_vloxei16_v_u64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4( @@ -508,7 +508,7 @@ vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u64m4(base, bindex, vl); + return __riscv_vloxei16_v_u64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8( @@ -517,7 +517,7 @@ vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u64m8(base, bindex, vl); + return __riscv_vloxei16_v_u64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_m( @@ -526,7 +526,7 @@ vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_m( @@ -535,7 +535,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_m( @@ -544,7 +544,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f16m1_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_m( @@ -553,7 +553,7 @@ vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f16m2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_m( @@ -562,7 +562,7 @@ vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f16m4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_m( @@ -571,7 +571,7 @@ vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_f16m8_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m( @@ -580,7 +580,7 @@ vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m( @@ -589,7 +589,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f32m1_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m( @@ -598,7 +598,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f32m2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m( @@ -607,7 +607,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f32m4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m( @@ -616,7 +616,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f32m8_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m( @@ -625,7 +625,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f64m1_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m( @@ -634,7 +634,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f64m2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m( @@ -643,7 +643,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f64m4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m( @@ -652,7 +652,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f64m8_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m( @@ -661,7 +661,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i8mf8_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m( @@ -670,7 +670,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i8mf4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m( @@ -679,7 +679,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i8mf2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m( @@ -688,7 +688,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i8m1_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m( @@ -697,7 +697,7 @@ vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i8m2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m( @@ -706,7 +706,7 @@ vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i8m4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m( @@ -715,7 +715,7 @@ vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m( @@ -724,7 +724,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m( @@ -733,7 +733,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i16m1_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m( @@ -742,7 +742,7 @@ vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i16m2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m( @@ -751,7 +751,7 @@ vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i16m4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m( @@ -760,7 +760,7 @@ vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i16m8_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m( @@ -769,7 +769,7 @@ vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m( @@ -778,7 +778,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i32m1_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m( @@ -787,7 +787,7 @@ vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i32m2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m( @@ -796,7 +796,7 @@ vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i32m4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m( @@ -805,7 +805,7 @@ vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i32m8_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m( @@ -814,7 +814,7 @@ vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i64m1_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m( @@ -823,7 +823,7 @@ vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i64m2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m( @@ -832,7 +832,7 @@ vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i64m4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m( @@ -841,7 +841,7 @@ vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i64m8_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m( @@ -850,7 +850,7 @@ vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u8mf8_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m( @@ -859,7 +859,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u8mf4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m( @@ -868,7 +868,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u8mf2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m( @@ -877,7 +877,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u8m1_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m( @@ -886,7 +886,7 @@ vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u8m2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m( @@ -895,7 +895,7 @@ vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u8m4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m( @@ -904,7 +904,7 @@ vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m( @@ -913,7 +913,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m( @@ -922,7 +922,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u16m1_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m( @@ -931,7 +931,7 @@ vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u16m2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m( @@ -940,7 +940,7 @@ vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u16m4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m( @@ -949,7 +949,7 @@ vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u16m8_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m( @@ -958,7 +958,7 @@ vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m( @@ -967,7 +967,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u32m1_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m( @@ -976,7 +976,7 @@ vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u32m2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m( @@ -985,7 +985,7 @@ vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u32m4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m( @@ -994,7 +994,7 @@ vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u32m8_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m( @@ -1003,7 +1003,7 @@ vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u64m1_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m( @@ -1012,7 +1012,7 @@ vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u64m2_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m( @@ -1021,7 +1021,7 @@ vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u64m4_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m( @@ -1030,6 +1030,6 @@ vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u64m8_m(mask, base, bindex, vl); + return __riscv_vloxei16_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei32.c index 347416d9f83d..894d6e8950d6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f16mf4(base, bindex, vl); + return __riscv_vloxei32_v_f16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f16mf2(base, bindex, vl); + return __riscv_vloxei32_v_f16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f16m1(base, bindex, vl); + return __riscv_vloxei32_v_f16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f16m2(base, bindex, vl); + return __riscv_vloxei32_v_f16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f16m4(base, bindex, vl); + return __riscv_vloxei32_v_f16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2( @@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f32mf2(base, bindex, vl); + return __riscv_vloxei32_v_f32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1( @@ -67,7 +67,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f32m1(base, bindex, vl); + return __riscv_vloxei32_v_f32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2( @@ -76,7 +76,7 @@ vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f32m2(base, bindex, vl); + return __riscv_vloxei32_v_f32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4( @@ -85,7 +85,7 @@ vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f32m4(base, bindex, vl); + return __riscv_vloxei32_v_f32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8( @@ -94,7 +94,7 @@ vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f32m8(base, bindex, vl); + return __riscv_vloxei32_v_f32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1( @@ -103,7 +103,7 @@ vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f64m1(base, bindex, vl); + return __riscv_vloxei32_v_f64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2( @@ -112,7 +112,7 @@ vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f64m2(base, bindex, vl); + return __riscv_vloxei32_v_f64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4( @@ -121,7 +121,7 @@ vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f64m4(base, bindex, vl); + return __riscv_vloxei32_v_f64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8( @@ -130,7 +130,7 @@ vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f64m8(base, bindex, vl); + return __riscv_vloxei32_v_f64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8( @@ -139,7 +139,7 @@ vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i8mf8(base, bindex, vl); + return __riscv_vloxei32_v_i8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4( @@ -148,7 +148,7 @@ vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i8mf4(base, bindex, vl); + return __riscv_vloxei32_v_i8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2( @@ -157,7 +157,7 @@ vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i8mf2(base, bindex, vl); + return __riscv_vloxei32_v_i8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1( @@ -166,7 +166,7 @@ vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i8m1(base, bindex, vl); + return __riscv_vloxei32_v_i8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2( @@ -175,7 +175,7 @@ vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i8m2(base, bindex, vl); + return __riscv_vloxei32_v_i8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4( @@ -184,7 +184,7 @@ vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i16mf4(base, bindex, vl); + return __riscv_vloxei32_v_i16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2( @@ -193,7 +193,7 @@ vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i16mf2(base, bindex, vl); + return __riscv_vloxei32_v_i16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1( @@ -202,7 +202,7 @@ vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i16m1(base, bindex, vl); + return __riscv_vloxei32_v_i16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2( @@ -211,7 +211,7 @@ vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i16m2(base, bindex, vl); + return __riscv_vloxei32_v_i16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4( @@ -220,7 +220,7 @@ vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i16m4(base, bindex, vl); + return __riscv_vloxei32_v_i16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2( @@ -229,7 +229,7 @@ vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i32mf2(base, bindex, vl); + return __riscv_vloxei32_v_i32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1( @@ -238,7 +238,7 @@ vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i32m1(base, bindex, vl); + return __riscv_vloxei32_v_i32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2( @@ -247,7 +247,7 @@ vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i32m2(base, bindex, vl); + return __riscv_vloxei32_v_i32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4( @@ -256,7 +256,7 @@ vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i32m4(base, bindex, vl); + return __riscv_vloxei32_v_i32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8( @@ -265,7 +265,7 @@ vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i32m8(base, bindex, vl); + return __riscv_vloxei32_v_i32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1( @@ -274,7 +274,7 @@ vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i64m1(base, bindex, vl); + return __riscv_vloxei32_v_i64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2( @@ -283,7 +283,7 @@ vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i64m2(base, bindex, vl); + return __riscv_vloxei32_v_i64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4( @@ -292,7 +292,7 @@ vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i64m4(base, bindex, vl); + return __riscv_vloxei32_v_i64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8( @@ -301,7 +301,7 @@ vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i64m8(base, bindex, vl); + return __riscv_vloxei32_v_i64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8( @@ -310,7 +310,7 @@ vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u8mf8(base, bindex, vl); + return __riscv_vloxei32_v_u8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4( @@ -319,7 +319,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u8mf4(base, bindex, vl); + return __riscv_vloxei32_v_u8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2( @@ -328,7 +328,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u8mf2(base, bindex, vl); + return __riscv_vloxei32_v_u8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1( @@ -337,7 +337,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u8m1(base, bindex, vl); + return __riscv_vloxei32_v_u8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2( @@ -346,7 +346,7 @@ vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u8m2(base, bindex, vl); + return __riscv_vloxei32_v_u8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4( @@ -355,7 +355,7 @@ vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u16mf4(base, bindex, vl); + return __riscv_vloxei32_v_u16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2( @@ -364,7 +364,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u16mf2(base, bindex, vl); + return __riscv_vloxei32_v_u16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1( @@ -373,7 +373,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u16m1(base, bindex, vl); + return __riscv_vloxei32_v_u16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2( @@ -382,7 +382,7 @@ vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u16m2(base, bindex, vl); + return __riscv_vloxei32_v_u16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4( @@ -391,7 +391,7 @@ vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u16m4(base, bindex, vl); + return __riscv_vloxei32_v_u16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2( @@ -400,7 +400,7 @@ vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u32mf2(base, bindex, vl); + return __riscv_vloxei32_v_u32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1( @@ -409,7 +409,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u32m1(base, bindex, vl); + return __riscv_vloxei32_v_u32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2( @@ -418,7 +418,7 @@ vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u32m2(base, bindex, vl); + return __riscv_vloxei32_v_u32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4( @@ -427,7 +427,7 @@ vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u32m4(base, bindex, vl); + return __riscv_vloxei32_v_u32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8( @@ -436,7 +436,7 @@ vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u32m8(base, bindex, vl); + return __riscv_vloxei32_v_u32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1( @@ -445,7 +445,7 @@ vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u64m1(base, bindex, vl); + return __riscv_vloxei32_v_u64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2( @@ -454,7 +454,7 @@ vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u64m2(base, bindex, vl); + return __riscv_vloxei32_v_u64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4( @@ -463,7 +463,7 @@ vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u64m4(base, bindex, vl); + return __riscv_vloxei32_v_u64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8( @@ -472,7 +472,7 @@ vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u64m8(base, bindex, vl); + return __riscv_vloxei32_v_u64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_m( @@ -481,7 +481,7 @@ vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_m( @@ -490,7 +490,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_m( @@ -499,7 +499,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f16m1_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_m( @@ -508,7 +508,7 @@ vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f16m2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_m( @@ -517,7 +517,7 @@ vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f16m4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m( @@ -526,7 +526,7 @@ vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m( @@ -535,7 +535,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f32m1_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m( @@ -544,7 +544,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f32m2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m( @@ -553,7 +553,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f32m4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m( @@ -562,7 +562,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f32m8_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m( @@ -571,7 +571,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f64m1_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m( @@ -580,7 +580,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f64m2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m( @@ -589,7 +589,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f64m4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m( @@ -598,7 +598,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f64m8_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m( @@ -607,7 +607,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i8mf8_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m( @@ -616,7 +616,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i8mf4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m( @@ -625,7 +625,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i8mf2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m( @@ -634,7 +634,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i8m1_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m( @@ -643,7 +643,7 @@ vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i8m2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m( @@ -652,7 +652,7 @@ vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m( @@ -661,7 +661,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m( @@ -670,7 +670,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i16m1_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m( @@ -679,7 +679,7 @@ vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i16m2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m( @@ -688,7 +688,7 @@ vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i16m4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m( @@ -697,7 +697,7 @@ vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m( @@ -706,7 +706,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i32m1_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m( @@ -715,7 +715,7 @@ vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i32m2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m( @@ -724,7 +724,7 @@ vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i32m4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m( @@ -733,7 +733,7 @@ vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i32m8_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m( @@ -742,7 +742,7 @@ vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i64m1_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m( @@ -751,7 +751,7 @@ vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i64m2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m( @@ -760,7 +760,7 @@ vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i64m4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m( @@ -769,7 +769,7 @@ vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i64m8_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m( @@ -778,7 +778,7 @@ vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u8mf8_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m( @@ -787,7 +787,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u8mf4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m( @@ -796,7 +796,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u8mf2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m( @@ -805,7 +805,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u8m1_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m( @@ -814,7 +814,7 @@ vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u8m2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m( @@ -823,7 +823,7 @@ vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m( @@ -832,7 +832,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m( @@ -841,7 +841,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u16m1_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m( @@ -850,7 +850,7 @@ vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u16m2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m( @@ -859,7 +859,7 @@ vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u16m4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m( @@ -868,7 +868,7 @@ vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m( @@ -877,7 +877,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u32m1_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m( @@ -886,7 +886,7 @@ vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u32m2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m( @@ -895,7 +895,7 @@ vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u32m4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m( @@ -904,7 +904,7 @@ vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u32m8_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m( @@ -913,7 +913,7 @@ vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u64m1_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m( @@ -922,7 +922,7 @@ vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u64m2_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m( @@ -931,7 +931,7 @@ vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u64m4_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m( @@ -940,6 +940,6 @@ vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u64m8_m(mask, base, bindex, vl); + return __riscv_vloxei32_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei64.c index 67a65b94ed8a..6cda99dd54b3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f16mf4(base, bindex, vl); + return __riscv_vloxei64_v_f16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f16mf2(base, bindex, vl); + return __riscv_vloxei64_v_f16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f16m1(base, bindex, vl); + return __riscv_vloxei64_v_f16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f16m2(base, bindex, vl); + return __riscv_vloxei64_v_f16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2( @@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f32mf2(base, bindex, vl); + return __riscv_vloxei64_v_f32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1( @@ -58,7 +58,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f32m1(base, bindex, vl); + return __riscv_vloxei64_v_f32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2( @@ -67,7 +67,7 @@ vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f32m2(base, bindex, vl); + return __riscv_vloxei64_v_f32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4( @@ -76,7 +76,7 @@ vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f32m4(base, bindex, vl); + return __riscv_vloxei64_v_f32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1( @@ -85,7 +85,7 @@ vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f64m1(base, bindex, vl); + return __riscv_vloxei64_v_f64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2( @@ -94,7 +94,7 @@ vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f64m2(base, bindex, vl); + return __riscv_vloxei64_v_f64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4( @@ -103,7 +103,7 @@ vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f64m4(base, bindex, vl); + return __riscv_vloxei64_v_f64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8( @@ -112,7 +112,7 @@ vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f64m8(base, bindex, vl); + return __riscv_vloxei64_v_f64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8( @@ -121,7 +121,7 @@ vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i8mf8(base, bindex, vl); + return __riscv_vloxei64_v_i8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4( @@ -130,7 +130,7 @@ vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i8mf4(base, bindex, vl); + return __riscv_vloxei64_v_i8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2( @@ -139,7 +139,7 @@ vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i8mf2(base, bindex, vl); + return __riscv_vloxei64_v_i8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1( @@ -148,7 +148,7 @@ vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i8m1(base, bindex, vl); + return __riscv_vloxei64_v_i8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4( @@ -157,7 +157,7 @@ vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i16mf4(base, bindex, vl); + return __riscv_vloxei64_v_i16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2( @@ -166,7 +166,7 @@ vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i16mf2(base, bindex, vl); + return __riscv_vloxei64_v_i16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1( @@ -175,7 +175,7 @@ vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i16m1(base, bindex, vl); + return __riscv_vloxei64_v_i16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2( @@ -184,7 +184,7 @@ vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i16m2(base, bindex, vl); + return __riscv_vloxei64_v_i16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2( @@ -193,7 +193,7 @@ vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i32mf2(base, bindex, vl); + return __riscv_vloxei64_v_i32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1( @@ -202,7 +202,7 @@ vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i32m1(base, bindex, vl); + return __riscv_vloxei64_v_i32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2( @@ -211,7 +211,7 @@ vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i32m2(base, bindex, vl); + return __riscv_vloxei64_v_i32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4( @@ -220,7 +220,7 @@ vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i32m4(base, bindex, vl); + return __riscv_vloxei64_v_i32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1( @@ -229,7 +229,7 @@ vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i64m1(base, bindex, vl); + return __riscv_vloxei64_v_i64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2( @@ -238,7 +238,7 @@ vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i64m2(base, bindex, vl); + return __riscv_vloxei64_v_i64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4( @@ -247,7 +247,7 @@ vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i64m4(base, bindex, vl); + return __riscv_vloxei64_v_i64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8( @@ -256,7 +256,7 @@ vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i64m8(base, bindex, vl); + return __riscv_vloxei64_v_i64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8( @@ -265,7 +265,7 @@ vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u8mf8(base, bindex, vl); + return __riscv_vloxei64_v_u8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4( @@ -274,7 +274,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u8mf4(base, bindex, vl); + return __riscv_vloxei64_v_u8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2( @@ -283,7 +283,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u8mf2(base, bindex, vl); + return __riscv_vloxei64_v_u8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1( @@ -292,7 +292,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u8m1(base, bindex, vl); + return __riscv_vloxei64_v_u8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4( @@ -301,7 +301,7 @@ vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u16mf4(base, bindex, vl); + return __riscv_vloxei64_v_u16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2( @@ -310,7 +310,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u16mf2(base, bindex, vl); + return __riscv_vloxei64_v_u16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1( @@ -319,7 +319,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u16m1(base, bindex, vl); + return __riscv_vloxei64_v_u16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2( @@ -328,7 +328,7 @@ vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u16m2(base, bindex, vl); + return __riscv_vloxei64_v_u16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2( @@ -337,7 +337,7 @@ vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u32mf2(base, bindex, vl); + return __riscv_vloxei64_v_u32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1( @@ -346,7 +346,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u32m1(base, bindex, vl); + return __riscv_vloxei64_v_u32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2( @@ -355,7 +355,7 @@ vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u32m2(base, bindex, vl); + return __riscv_vloxei64_v_u32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4( @@ -364,7 +364,7 @@ vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u32m4(base, bindex, vl); + return __riscv_vloxei64_v_u32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1( @@ -373,7 +373,7 @@ vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u64m1(base, bindex, vl); + return __riscv_vloxei64_v_u64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2( @@ -382,7 +382,7 @@ vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u64m2(base, bindex, vl); + return __riscv_vloxei64_v_u64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4( @@ -391,7 +391,7 @@ vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u64m4(base, bindex, vl); + return __riscv_vloxei64_v_u64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8( @@ -400,7 +400,7 @@ vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u64m8(base, bindex, vl); + return __riscv_vloxei64_v_u64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_m( @@ -409,7 +409,7 @@ vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_m( @@ -418,7 +418,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_m( @@ -427,7 +427,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f16m1_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_m( @@ -436,7 +436,7 @@ vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f16m2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m( @@ -445,7 +445,7 @@ vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m( @@ -454,7 +454,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f32m1_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m( @@ -463,7 +463,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f32m2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m( @@ -472,7 +472,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f32m4_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f64m1_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f64m2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m( @@ -499,7 +499,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f64m4_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m( @@ -508,7 +508,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f64m8_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m( @@ -517,7 +517,7 @@ vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i8mf8_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m( @@ -526,7 +526,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i8mf4_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m( @@ -535,7 +535,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i8mf2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m( @@ -544,7 +544,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i8m1_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m( @@ -553,7 +553,7 @@ vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m( @@ -562,7 +562,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m( @@ -571,7 +571,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i16m1_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m( @@ -580,7 +580,7 @@ vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i16m2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m( @@ -589,7 +589,7 @@ vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m( @@ -598,7 +598,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i32m1_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m( @@ -607,7 +607,7 @@ vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i32m2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m( @@ -616,7 +616,7 @@ vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i32m4_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m( @@ -625,7 +625,7 @@ vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i64m1_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m( @@ -634,7 +634,7 @@ vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i64m2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m( @@ -643,7 +643,7 @@ vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i64m4_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m( @@ -652,7 +652,7 @@ vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i64m8_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m( @@ -661,7 +661,7 @@ vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u8mf8_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m( @@ -670,7 +670,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u8mf4_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m( @@ -679,7 +679,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u8mf2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m( @@ -688,7 +688,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u8m1_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m( @@ -697,7 +697,7 @@ vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m( @@ -706,7 +706,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m( @@ -715,7 +715,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u16m1_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m( @@ -724,7 +724,7 @@ vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u16m2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m( @@ -733,7 +733,7 @@ vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m( @@ -742,7 +742,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u32m1_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m( @@ -751,7 +751,7 @@ vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u32m2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m( @@ -760,7 +760,7 @@ vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u32m4_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m( @@ -769,7 +769,7 @@ vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u64m1_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m( @@ -778,7 +778,7 @@ vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u64m2_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m( @@ -787,7 +787,7 @@ vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u64m4_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m( @@ -796,6 +796,6 @@ vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u64m8_m(mask, base, bindex, vl); + return __riscv_vloxei64_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei8.c index ade0c80e8be4..3da030733a47 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f16mf4(base, bindex, vl); + return __riscv_vloxei8_v_f16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f16mf2(base, bindex, vl); + return __riscv_vloxei8_v_f16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f16m1(base, bindex, vl); + return __riscv_vloxei8_v_f16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f16m2(base, bindex, vl); + return __riscv_vloxei8_v_f16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f16m4(base, bindex, vl); + return __riscv_vloxei8_v_f16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_f16m8(base, bindex, vl); + return __riscv_vloxei8_v_f16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vloxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f32mf2(base, bindex, vl); + return __riscv_vloxei8_v_f32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f32m1(base, bindex, vl); + return __riscv_vloxei8_v_f32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f32m2(base, bindex, vl); + return __riscv_vloxei8_v_f32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f32m4(base, bindex, vl); + return __riscv_vloxei8_v_f32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f32m8(base, bindex, vl); + return __riscv_vloxei8_v_f32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f64m1(base, bindex, vl); + return __riscv_vloxei8_v_f64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f64m2(base, bindex, vl); + return __riscv_vloxei8_v_f64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f64m4(base, bindex, vl); + return __riscv_vloxei8_v_f64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f64m8(base, bindex, vl); + return __riscv_vloxei8_v_f64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8( @@ -148,7 +148,7 @@ vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i8mf8(base, bindex, vl); + return __riscv_vloxei8_v_i8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4( @@ -157,7 +157,7 @@ vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i8mf4(base, bindex, vl); + return __riscv_vloxei8_v_i8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2( @@ -166,7 +166,7 @@ vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i8mf2(base, bindex, vl); + return __riscv_vloxei8_v_i8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1( @@ -175,7 +175,7 @@ vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i8m1(base, bindex, vl); + return __riscv_vloxei8_v_i8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2( @@ -184,7 +184,7 @@ vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i8m2(base, bindex, vl); + return __riscv_vloxei8_v_i8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4( @@ -193,7 +193,7 @@ vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i8m4(base, bindex, vl); + return __riscv_vloxei8_v_i8m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8( @@ -202,7 +202,7 @@ vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_i8m8(base, bindex, vl); + return __riscv_vloxei8_v_i8m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4( @@ -211,7 +211,7 @@ vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i16mf4(base, bindex, vl); + return __riscv_vloxei8_v_i16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2( @@ -220,7 +220,7 @@ vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i16mf2(base, bindex, vl); + return __riscv_vloxei8_v_i16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1( @@ -229,7 +229,7 @@ vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i16m1(base, bindex, vl); + return __riscv_vloxei8_v_i16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2( @@ -238,7 +238,7 @@ vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i16m2(base, bindex, vl); + return __riscv_vloxei8_v_i16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4( @@ -247,7 +247,7 @@ vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i16m4(base, bindex, vl); + return __riscv_vloxei8_v_i16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8( @@ -256,7 +256,7 @@ vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i16m8(base, bindex, vl); + return __riscv_vloxei8_v_i16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2( @@ -265,7 +265,7 @@ vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i32mf2(base, bindex, vl); + return __riscv_vloxei8_v_i32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1( @@ -274,7 +274,7 @@ vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i32m1(base, bindex, vl); + return __riscv_vloxei8_v_i32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2( @@ -283,7 +283,7 @@ vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i32m2(base, bindex, vl); + return __riscv_vloxei8_v_i32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4( @@ -292,7 +292,7 @@ vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i32m4(base, bindex, vl); + return __riscv_vloxei8_v_i32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8( @@ -301,7 +301,7 @@ vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i32m8(base, bindex, vl); + return __riscv_vloxei8_v_i32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1( @@ -310,7 +310,7 @@ vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i64m1(base, bindex, vl); + return __riscv_vloxei8_v_i64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2( @@ -319,7 +319,7 @@ vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i64m2(base, bindex, vl); + return __riscv_vloxei8_v_i64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4( @@ -328,7 +328,7 @@ vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i64m4(base, bindex, vl); + return __riscv_vloxei8_v_i64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8( @@ -337,7 +337,7 @@ vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i64m8(base, bindex, vl); + return __riscv_vloxei8_v_i64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8( @@ -346,7 +346,7 @@ vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u8mf8(base, bindex, vl); + return __riscv_vloxei8_v_u8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4( @@ -355,7 +355,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u8mf4(base, bindex, vl); + return __riscv_vloxei8_v_u8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2( @@ -364,7 +364,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u8mf2(base, bindex, vl); + return __riscv_vloxei8_v_u8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1( @@ -373,7 +373,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u8m1(base, bindex, vl); + return __riscv_vloxei8_v_u8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2( @@ -382,7 +382,7 @@ vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u8m2(base, bindex, vl); + return __riscv_vloxei8_v_u8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4( @@ -391,7 +391,7 @@ vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u8m4(base, bindex, vl); + return __riscv_vloxei8_v_u8m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8( @@ -400,7 +400,7 @@ vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_u8m8(base, bindex, vl); + return __riscv_vloxei8_v_u8m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4( @@ -409,7 +409,7 @@ vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u16mf4(base, bindex, vl); + return __riscv_vloxei8_v_u16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2( @@ -418,7 +418,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u16mf2(base, bindex, vl); + return __riscv_vloxei8_v_u16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1( @@ -427,7 +427,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u16m1(base, bindex, vl); + return __riscv_vloxei8_v_u16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2( @@ -436,7 +436,7 @@ vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u16m2(base, bindex, vl); + return __riscv_vloxei8_v_u16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4( @@ -445,7 +445,7 @@ vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u16m4(base, bindex, vl); + return __riscv_vloxei8_v_u16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8( @@ -454,7 +454,7 @@ vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u16m8(base, bindex, vl); + return __riscv_vloxei8_v_u16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2( @@ -463,7 +463,7 @@ vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u32mf2(base, bindex, vl); + return __riscv_vloxei8_v_u32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1( @@ -472,7 +472,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u32m1(base, bindex, vl); + return __riscv_vloxei8_v_u32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2( @@ -481,7 +481,7 @@ vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u32m2(base, bindex, vl); + return __riscv_vloxei8_v_u32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4( @@ -490,7 +490,7 @@ vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u32m4(base, bindex, vl); + return __riscv_vloxei8_v_u32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8( @@ -499,7 +499,7 @@ vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u32m8(base, bindex, vl); + return __riscv_vloxei8_v_u32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1( @@ -508,7 +508,7 @@ vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u64m1(base, bindex, vl); + return __riscv_vloxei8_v_u64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2( @@ -517,7 +517,7 @@ vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u64m2(base, bindex, vl); + return __riscv_vloxei8_v_u64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4( @@ -526,7 +526,7 @@ vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u64m4(base, bindex, vl); + return __riscv_vloxei8_v_u64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8( @@ -535,7 +535,7 @@ vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u64m8(base, bindex, vl); + return __riscv_vloxei8_v_u64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_m( @@ -544,7 +544,7 @@ vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_m( @@ -553,7 +553,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_m( @@ -562,7 +562,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f16m1_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_m( @@ -571,7 +571,7 @@ vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f16m2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_m( @@ -580,7 +580,7 @@ vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f16m4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_m( @@ -589,7 +589,7 @@ vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_f16m8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m( @@ -598,7 +598,7 @@ vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m( @@ -607,7 +607,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f32m1_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m( @@ -616,7 +616,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f32m2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m( @@ -625,7 +625,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f32m4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m( @@ -634,7 +634,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f32m8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m( @@ -643,7 +643,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f64m1_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m( @@ -652,7 +652,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f64m2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m( @@ -661,7 +661,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f64m4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m( @@ -670,7 +670,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f64m8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m( @@ -679,7 +679,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i8mf8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m( @@ -688,7 +688,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i8mf4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m( @@ -697,7 +697,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i8mf2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m( @@ -706,7 +706,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i8m1_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m( @@ -715,7 +715,7 @@ vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i8m2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m( @@ -724,7 +724,7 @@ vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i8m4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m( @@ -733,7 +733,7 @@ vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_i8m8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i8m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m( @@ -742,7 +742,7 @@ vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m( @@ -751,7 +751,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m( @@ -760,7 +760,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i16m1_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m( @@ -769,7 +769,7 @@ vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i16m2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m( @@ -778,7 +778,7 @@ vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i16m4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m( @@ -787,7 +787,7 @@ vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i16m8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m( @@ -796,7 +796,7 @@ vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m( @@ -805,7 +805,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i32m1_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m( @@ -814,7 +814,7 @@ vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i32m2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m( @@ -823,7 +823,7 @@ vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i32m4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m( @@ -832,7 +832,7 @@ vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i32m8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m( @@ -841,7 +841,7 @@ vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i64m1_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m( @@ -850,7 +850,7 @@ vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i64m2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m( @@ -859,7 +859,7 @@ vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i64m4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m( @@ -868,7 +868,7 @@ vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i64m8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m( @@ -877,7 +877,7 @@ vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u8mf8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m( @@ -886,7 +886,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u8mf4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m( @@ -895,7 +895,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u8mf2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m( @@ -904,7 +904,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u8m1_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m( @@ -913,7 +913,7 @@ vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u8m2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m( @@ -922,7 +922,7 @@ vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u8m4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m( @@ -931,7 +931,7 @@ vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_u8m8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u8m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m( @@ -940,7 +940,7 @@ vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u16mf4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m( @@ -949,7 +949,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u16mf2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m( @@ -958,7 +958,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u16m1_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m( @@ -967,7 +967,7 @@ vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u16m2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m( @@ -976,7 +976,7 @@ vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u16m4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m( @@ -985,7 +985,7 @@ vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u16m8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m( @@ -994,7 +994,7 @@ vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u32mf2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m( @@ -1003,7 +1003,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u32m1_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m( @@ -1012,7 +1012,7 @@ vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u32m2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m( @@ -1021,7 +1021,7 @@ vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u32m4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m( @@ -1030,7 +1030,7 @@ vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u32m8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m( @@ -1039,7 +1039,7 @@ vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u64m1_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m( @@ -1048,7 +1048,7 @@ vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u64m2_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m( @@ -1057,7 +1057,7 @@ vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u64m4_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m( @@ -1066,6 +1066,6 @@ vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u64m8_m(mask, base, bindex, vl); + return __riscv_vloxei8_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c index f62e700f7775..7f76399bd894 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei16.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2( @@ -30,7 +30,7 @@ void test_vloxseg2ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1( @@ -43,7 +43,7 @@ void test_vloxseg2ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2( @@ -56,7 +56,7 @@ void test_vloxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4( @@ -69,7 +69,7 @@ void test_vloxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2( @@ -82,7 +82,7 @@ void test_vloxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1( @@ -95,7 +95,7 @@ void test_vloxseg2ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2( @@ -108,7 +108,7 @@ void test_vloxseg2ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4( @@ -121,7 +121,7 @@ void test_vloxseg2ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1( @@ -134,7 +134,7 @@ void test_vloxseg2ei16_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2( @@ -147,7 +147,7 @@ void test_vloxseg2ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4( @@ -160,7 +160,7 @@ void test_vloxseg2ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8( @@ -173,7 +173,7 @@ void test_vloxseg2ei16_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf8(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4( @@ -186,7 +186,7 @@ void test_vloxseg2ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2( @@ -199,7 +199,7 @@ void test_vloxseg2ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1( @@ -212,7 +212,7 @@ void test_vloxseg2ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2( @@ -225,7 +225,7 @@ void test_vloxseg2ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4( @@ -238,7 +238,7 @@ void test_vloxseg2ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4( @@ -251,7 +251,7 @@ void test_vloxseg2ei16_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2( @@ -264,7 +264,7 @@ void test_vloxseg2ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1( @@ -277,7 +277,7 @@ void test_vloxseg2ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2( @@ -290,7 +290,7 @@ void test_vloxseg2ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4( @@ -303,7 +303,7 @@ void test_vloxseg2ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2( @@ -316,7 +316,7 @@ void test_vloxseg2ei16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1( @@ -329,7 +329,7 @@ void test_vloxseg2ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2( @@ -342,7 +342,7 @@ void test_vloxseg2ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4( @@ -355,7 +355,7 @@ void test_vloxseg2ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1( @@ -368,7 +368,7 @@ void test_vloxseg2ei16_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2( @@ -381,7 +381,7 @@ void test_vloxseg2ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4( @@ -394,7 +394,7 @@ void test_vloxseg2ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8( @@ -407,7 +407,7 @@ void test_vloxseg2ei16_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf8(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4( @@ -420,7 +420,7 @@ void test_vloxseg2ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2( @@ -433,7 +433,7 @@ void test_vloxseg2ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1( @@ -446,7 +446,7 @@ void test_vloxseg2ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2( @@ -459,7 +459,7 @@ void test_vloxseg2ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4( @@ -472,7 +472,7 @@ void test_vloxseg2ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4( @@ -485,7 +485,7 @@ void test_vloxseg2ei16_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2( @@ -498,7 +498,7 @@ void test_vloxseg2ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1( @@ -511,7 +511,7 @@ void test_vloxseg2ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2( @@ -524,7 +524,7 @@ void test_vloxseg2ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4( @@ -537,7 +537,7 @@ void test_vloxseg2ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2( @@ -550,7 +550,7 @@ void test_vloxseg2ei16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1( @@ -563,7 +563,7 @@ void test_vloxseg2ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2( @@ -576,7 +576,7 @@ void test_vloxseg2ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4( @@ -589,7 +589,7 @@ void test_vloxseg2ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1( @@ -602,7 +602,7 @@ void test_vloxseg2ei16_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2( @@ -615,7 +615,7 @@ void test_vloxseg2ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4( @@ -628,7 +628,7 @@ void test_vloxseg2ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_m( @@ -641,7 +641,7 @@ void test_vloxseg2ei16_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_m( @@ -654,7 +654,7 @@ void test_vloxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_m( @@ -667,7 +667,7 @@ void test_vloxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_m( @@ -680,7 +680,7 @@ void test_vloxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_m( @@ -693,7 +693,7 @@ void test_vloxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( @@ -706,7 +706,7 @@ void test_vloxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_m( @@ -719,7 +719,7 @@ void test_vloxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_m( @@ -732,7 +732,7 @@ void test_vloxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_m( @@ -745,7 +745,7 @@ void test_vloxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_m( @@ -758,7 +758,7 @@ void test_vloxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_m( @@ -771,7 +771,7 @@ void test_vloxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_m( @@ -784,7 +784,7 @@ void test_vloxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m( @@ -797,7 +797,7 @@ void test_vloxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_m( @@ -810,7 +810,7 @@ void test_vloxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_m( @@ -823,7 +823,7 @@ void test_vloxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_m( @@ -836,7 +836,7 @@ void test_vloxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_m( @@ -849,7 +849,7 @@ void test_vloxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_m( @@ -862,7 +862,7 @@ void test_vloxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, con // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m( @@ -875,7 +875,7 @@ void test_vloxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, con // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_m( @@ -888,7 +888,7 @@ void test_vloxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vloxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_m( @@ -914,7 +914,7 @@ void test_vloxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_m( @@ -927,7 +927,7 @@ void test_vloxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m( @@ -940,7 +940,7 @@ void test_vloxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_m( @@ -953,7 +953,7 @@ void test_vloxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_m( @@ -966,7 +966,7 @@ void test_vloxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_m( @@ -979,7 +979,7 @@ void test_vloxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_m( @@ -992,7 +992,7 @@ void test_vloxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_m( @@ -1005,7 +1005,7 @@ void test_vloxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_m( @@ -1018,7 +1018,7 @@ void test_vloxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( @@ -1031,7 +1031,7 @@ void test_vloxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( @@ -1044,7 +1044,7 @@ void test_vloxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( @@ -1057,7 +1057,7 @@ void test_vloxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_m( @@ -1070,7 +1070,7 @@ void test_vloxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_m( @@ -1083,7 +1083,7 @@ void test_vloxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_m( @@ -1096,7 +1096,7 @@ void test_vloxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, c // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( @@ -1109,7 +1109,7 @@ void test_vloxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, c // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( @@ -1122,7 +1122,7 @@ void test_vloxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_m( @@ -1135,7 +1135,7 @@ void test_vloxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_m( @@ -1148,7 +1148,7 @@ void test_vloxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_m( @@ -1161,7 +1161,7 @@ void test_vloxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( @@ -1174,7 +1174,7 @@ void test_vloxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_m( @@ -1187,7 +1187,7 @@ void test_vloxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_m( @@ -1200,7 +1200,7 @@ void test_vloxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_m( @@ -1213,7 +1213,7 @@ void test_vloxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_m( @@ -1226,7 +1226,7 @@ void test_vloxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_m( @@ -1239,7 +1239,7 @@ void test_vloxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_m( @@ -1252,6 +1252,6 @@ void test_vloxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c index 0403024af5a9..b116ce45dafd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei32.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2( @@ -30,7 +30,7 @@ void test_vloxseg2ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1( @@ -43,7 +43,7 @@ void test_vloxseg2ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2( @@ -56,7 +56,7 @@ void test_vloxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4( @@ -69,7 +69,7 @@ void test_vloxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2( @@ -82,7 +82,7 @@ void test_vloxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1( @@ -95,7 +95,7 @@ void test_vloxseg2ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2( @@ -108,7 +108,7 @@ void test_vloxseg2ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4( @@ -121,7 +121,7 @@ void test_vloxseg2ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1( @@ -134,7 +134,7 @@ void test_vloxseg2ei32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2( @@ -147,7 +147,7 @@ void test_vloxseg2ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4( @@ -160,7 +160,7 @@ void test_vloxseg2ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8( @@ -173,7 +173,7 @@ void test_vloxseg2ei32_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf8(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4( @@ -186,7 +186,7 @@ void test_vloxseg2ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2( @@ -199,7 +199,7 @@ void test_vloxseg2ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1( @@ -212,7 +212,7 @@ void test_vloxseg2ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2( @@ -225,7 +225,7 @@ void test_vloxseg2ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4( @@ -238,7 +238,7 @@ void test_vloxseg2ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2( @@ -251,7 +251,7 @@ void test_vloxseg2ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1( @@ -264,7 +264,7 @@ void test_vloxseg2ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2( @@ -277,7 +277,7 @@ void test_vloxseg2ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4( @@ -290,7 +290,7 @@ void test_vloxseg2ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2( @@ -303,7 +303,7 @@ void test_vloxseg2ei32_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1( @@ -316,7 +316,7 @@ void test_vloxseg2ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2( @@ -329,7 +329,7 @@ void test_vloxseg2ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4( @@ -342,7 +342,7 @@ void test_vloxseg2ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1( @@ -355,7 +355,7 @@ void test_vloxseg2ei32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2( @@ -368,7 +368,7 @@ void test_vloxseg2ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4( @@ -381,7 +381,7 @@ void test_vloxseg2ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8( @@ -394,7 +394,7 @@ void test_vloxseg2ei32_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf8(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4( @@ -407,7 +407,7 @@ void test_vloxseg2ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2( @@ -420,7 +420,7 @@ void test_vloxseg2ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1( @@ -433,7 +433,7 @@ void test_vloxseg2ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2( @@ -446,7 +446,7 @@ void test_vloxseg2ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4( @@ -459,7 +459,7 @@ void test_vloxseg2ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2( @@ -472,7 +472,7 @@ void test_vloxseg2ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1( @@ -485,7 +485,7 @@ void test_vloxseg2ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2( @@ -498,7 +498,7 @@ void test_vloxseg2ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4( @@ -511,7 +511,7 @@ void test_vloxseg2ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2( @@ -524,7 +524,7 @@ void test_vloxseg2ei32_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1( @@ -537,7 +537,7 @@ void test_vloxseg2ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2( @@ -550,7 +550,7 @@ void test_vloxseg2ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4( @@ -563,7 +563,7 @@ void test_vloxseg2ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1( @@ -576,7 +576,7 @@ void test_vloxseg2ei32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2( @@ -589,7 +589,7 @@ void test_vloxseg2ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4( @@ -602,7 +602,7 @@ void test_vloxseg2ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_m( @@ -615,7 +615,7 @@ void test_vloxseg2ei32_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_m( @@ -628,7 +628,7 @@ void test_vloxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_m( @@ -641,7 +641,7 @@ void test_vloxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_m( @@ -654,7 +654,7 @@ void test_vloxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_m( @@ -667,7 +667,7 @@ void test_vloxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( @@ -680,7 +680,7 @@ void test_vloxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_m( @@ -693,7 +693,7 @@ void test_vloxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_m( @@ -706,7 +706,7 @@ void test_vloxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_m( @@ -719,7 +719,7 @@ void test_vloxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_m( @@ -732,7 +732,7 @@ void test_vloxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_m( @@ -745,7 +745,7 @@ void test_vloxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_m( @@ -758,7 +758,7 @@ void test_vloxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m( @@ -771,7 +771,7 @@ void test_vloxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_m( @@ -784,7 +784,7 @@ void test_vloxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_m( @@ -797,7 +797,7 @@ void test_vloxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( @@ -810,7 +810,7 @@ void test_vloxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_m( @@ -823,7 +823,7 @@ void test_vloxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( @@ -836,7 +836,7 @@ void test_vloxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, con // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_m( @@ -849,7 +849,7 @@ void test_vloxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_m( @@ -862,7 +862,7 @@ void test_vloxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_m( @@ -875,7 +875,7 @@ void test_vloxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_m( @@ -888,7 +888,7 @@ void test_vloxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( @@ -901,7 +901,7 @@ void test_vloxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_m( @@ -914,7 +914,7 @@ void test_vloxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_m( @@ -927,7 +927,7 @@ void test_vloxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_m( @@ -940,7 +940,7 @@ void test_vloxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_m( @@ -953,7 +953,7 @@ void test_vloxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_m( @@ -966,7 +966,7 @@ void test_vloxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_m( @@ -979,7 +979,7 @@ void test_vloxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( @@ -992,7 +992,7 @@ void test_vloxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( @@ -1005,7 +1005,7 @@ void test_vloxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( @@ -1018,7 +1018,7 @@ void test_vloxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m( @@ -1031,7 +1031,7 @@ void test_vloxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( @@ -1044,7 +1044,7 @@ void test_vloxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( @@ -1057,7 +1057,7 @@ void test_vloxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, c // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( @@ -1070,7 +1070,7 @@ void test_vloxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_m( @@ -1083,7 +1083,7 @@ void test_vloxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_m( @@ -1096,7 +1096,7 @@ void test_vloxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_m( @@ -1109,7 +1109,7 @@ void test_vloxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( @@ -1122,7 +1122,7 @@ void test_vloxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_m( @@ -1135,7 +1135,7 @@ void test_vloxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_m( @@ -1148,7 +1148,7 @@ void test_vloxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_m( @@ -1161,7 +1161,7 @@ void test_vloxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_m( @@ -1174,7 +1174,7 @@ void test_vloxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_m( @@ -1187,7 +1187,7 @@ void test_vloxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_m( @@ -1200,6 +1200,6 @@ void test_vloxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c index 22571eb87933..936b5a4f9956 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei64.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2( @@ -30,7 +30,7 @@ void test_vloxseg2ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1( @@ -43,7 +43,7 @@ void test_vloxseg2ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2( @@ -56,7 +56,7 @@ void test_vloxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2( @@ -69,7 +69,7 @@ void test_vloxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1( @@ -82,7 +82,7 @@ void test_vloxseg2ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2( @@ -95,7 +95,7 @@ void test_vloxseg2ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4( @@ -108,7 +108,7 @@ void test_vloxseg2ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1( @@ -121,7 +121,7 @@ void test_vloxseg2ei64_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2( @@ -134,7 +134,7 @@ void test_vloxseg2ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4( @@ -147,7 +147,7 @@ void test_vloxseg2ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8( @@ -160,7 +160,7 @@ void test_vloxseg2ei64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf8(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4( @@ -173,7 +173,7 @@ void test_vloxseg2ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2( @@ -186,7 +186,7 @@ void test_vloxseg2ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1( @@ -199,7 +199,7 @@ void test_vloxseg2ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i8m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4( @@ -212,7 +212,7 @@ void test_vloxseg2ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2( @@ -225,7 +225,7 @@ void test_vloxseg2ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1( @@ -238,7 +238,7 @@ void test_vloxseg2ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2( @@ -251,7 +251,7 @@ void test_vloxseg2ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2( @@ -264,7 +264,7 @@ void test_vloxseg2ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1( @@ -277,7 +277,7 @@ void test_vloxseg2ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2( @@ -290,7 +290,7 @@ void test_vloxseg2ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4( @@ -303,7 +303,7 @@ void test_vloxseg2ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1( @@ -316,7 +316,7 @@ void test_vloxseg2ei64_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2( @@ -329,7 +329,7 @@ void test_vloxseg2ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4( @@ -342,7 +342,7 @@ void test_vloxseg2ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8( @@ -355,7 +355,7 @@ void test_vloxseg2ei64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf8(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4( @@ -368,7 +368,7 @@ void test_vloxseg2ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2( @@ -381,7 +381,7 @@ void test_vloxseg2ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1( @@ -394,7 +394,7 @@ void test_vloxseg2ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u8m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4( @@ -407,7 +407,7 @@ void test_vloxseg2ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2( @@ -420,7 +420,7 @@ void test_vloxseg2ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1( @@ -433,7 +433,7 @@ void test_vloxseg2ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2( @@ -446,7 +446,7 @@ void test_vloxseg2ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2( @@ -459,7 +459,7 @@ void test_vloxseg2ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1( @@ -472,7 +472,7 @@ void test_vloxseg2ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2( @@ -485,7 +485,7 @@ void test_vloxseg2ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4( @@ -498,7 +498,7 @@ void test_vloxseg2ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1( @@ -511,7 +511,7 @@ void test_vloxseg2ei64_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2( @@ -524,7 +524,7 @@ void test_vloxseg2ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4( @@ -537,7 +537,7 @@ void test_vloxseg2ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_m( @@ -550,7 +550,7 @@ void test_vloxseg2ei64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_m( @@ -563,7 +563,7 @@ void test_vloxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_m( @@ -576,7 +576,7 @@ void test_vloxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_m( @@ -589,7 +589,7 @@ void test_vloxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( @@ -602,7 +602,7 @@ void test_vloxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_m( @@ -615,7 +615,7 @@ void test_vloxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_m( @@ -628,7 +628,7 @@ void test_vloxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_m( @@ -641,7 +641,7 @@ void test_vloxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_m( @@ -654,7 +654,7 @@ void test_vloxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_m( @@ -667,7 +667,7 @@ void test_vloxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_m( @@ -680,7 +680,7 @@ void test_vloxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m( @@ -693,7 +693,7 @@ void test_vloxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_m( @@ -706,7 +706,7 @@ void test_vloxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_m( @@ -719,7 +719,7 @@ void test_vloxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_m( @@ -732,7 +732,7 @@ void test_vloxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m( @@ -745,7 +745,7 @@ void test_vloxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_m( @@ -758,7 +758,7 @@ void test_vloxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m( @@ -771,7 +771,7 @@ void test_vloxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_m( @@ -784,7 +784,7 @@ void test_vloxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m( @@ -797,7 +797,7 @@ void test_vloxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_m( @@ -810,7 +810,7 @@ void test_vloxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_m( @@ -823,7 +823,7 @@ void test_vloxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_m( @@ -836,7 +836,7 @@ void test_vloxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_m( @@ -849,7 +849,7 @@ void test_vloxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_m( @@ -862,7 +862,7 @@ void test_vloxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_m( @@ -875,7 +875,7 @@ void test_vloxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( @@ -888,7 +888,7 @@ void test_vloxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( @@ -901,7 +901,7 @@ void test_vloxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( @@ -914,7 +914,7 @@ void test_vloxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_m( @@ -927,7 +927,7 @@ void test_vloxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( @@ -940,7 +940,7 @@ void test_vloxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( @@ -953,7 +953,7 @@ void test_vloxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_m( @@ -966,7 +966,7 @@ void test_vloxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_m( @@ -979,7 +979,7 @@ void test_vloxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( @@ -992,7 +992,7 @@ void test_vloxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_m( @@ -1005,7 +1005,7 @@ void test_vloxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_m( @@ -1018,7 +1018,7 @@ void test_vloxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_m( @@ -1031,7 +1031,7 @@ void test_vloxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_m( @@ -1044,7 +1044,7 @@ void test_vloxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_m( @@ -1057,7 +1057,7 @@ void test_vloxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_m( @@ -1070,6 +1070,6 @@ void test_vloxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c index 043d4df0736e..b9cfefe19fc3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg2ei8.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2( @@ -30,7 +30,7 @@ void test_vloxseg2ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Floa // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1( @@ -43,7 +43,7 @@ void test_vloxseg2ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Floa // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2( @@ -56,7 +56,7 @@ void test_vloxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4( @@ -69,7 +69,7 @@ void test_vloxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2( @@ -82,7 +82,7 @@ void test_vloxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1( @@ -95,7 +95,7 @@ void test_vloxseg2ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2( @@ -108,7 +108,7 @@ void test_vloxseg2ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *b // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4( @@ -121,7 +121,7 @@ void test_vloxseg2ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *b // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1( @@ -134,7 +134,7 @@ void test_vloxseg2ei8_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *b // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2( @@ -147,7 +147,7 @@ void test_vloxseg2ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4( @@ -160,7 +160,7 @@ void test_vloxseg2ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8( @@ -173,7 +173,7 @@ void test_vloxseg2ei8_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf8(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4( @@ -186,7 +186,7 @@ void test_vloxseg2ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2( @@ -199,7 +199,7 @@ void test_vloxseg2ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1( @@ -212,7 +212,7 @@ void test_vloxseg2ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2( @@ -225,7 +225,7 @@ void test_vloxseg2ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4( @@ -238,7 +238,7 @@ void test_vloxseg2ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4( @@ -251,7 +251,7 @@ void test_vloxseg2ei8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2( @@ -264,7 +264,7 @@ void test_vloxseg2ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1( @@ -277,7 +277,7 @@ void test_vloxseg2ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2( @@ -290,7 +290,7 @@ void test_vloxseg2ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4( @@ -303,7 +303,7 @@ void test_vloxseg2ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2( @@ -316,7 +316,7 @@ void test_vloxseg2ei8_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1( @@ -329,7 +329,7 @@ void test_vloxseg2ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2( @@ -342,7 +342,7 @@ void test_vloxseg2ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4( @@ -355,7 +355,7 @@ void test_vloxseg2ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1( @@ -368,7 +368,7 @@ void test_vloxseg2ei8_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2( @@ -381,7 +381,7 @@ void test_vloxseg2ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4( @@ -394,7 +394,7 @@ void test_vloxseg2ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8( @@ -407,7 +407,7 @@ void test_vloxseg2ei8_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *bas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf8(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4( @@ -420,7 +420,7 @@ void test_vloxseg2ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *b // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2( @@ -433,7 +433,7 @@ void test_vloxseg2ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *b // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1( @@ -446,7 +446,7 @@ void test_vloxseg2ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *b // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2( @@ -459,7 +459,7 @@ void test_vloxseg2ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4( @@ -472,7 +472,7 @@ void test_vloxseg2ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4( @@ -485,7 +485,7 @@ void test_vloxseg2ei8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2( @@ -498,7 +498,7 @@ void test_vloxseg2ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1( @@ -511,7 +511,7 @@ void test_vloxseg2ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2( @@ -524,7 +524,7 @@ void test_vloxseg2ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4( @@ -537,7 +537,7 @@ void test_vloxseg2ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2( @@ -550,7 +550,7 @@ void test_vloxseg2ei8_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u32mf2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1( @@ -563,7 +563,7 @@ void test_vloxseg2ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2( @@ -576,7 +576,7 @@ void test_vloxseg2ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4( @@ -589,7 +589,7 @@ void test_vloxseg2ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1( @@ -602,7 +602,7 @@ void test_vloxseg2ei8_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m1(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2( @@ -615,7 +615,7 @@ void test_vloxseg2ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m2(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4( @@ -628,7 +628,7 @@ void test_vloxseg2ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m4(v0, v1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_m( @@ -641,7 +641,7 @@ void test_vloxseg2ei8_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_m( @@ -654,7 +654,7 @@ void test_vloxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_m( @@ -667,7 +667,7 @@ void test_vloxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_m( @@ -680,7 +680,7 @@ void test_vloxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_m( @@ -693,7 +693,7 @@ void test_vloxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( @@ -706,7 +706,7 @@ void test_vloxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_m( @@ -719,7 +719,7 @@ void test_vloxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_m( @@ -732,7 +732,7 @@ void test_vloxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_m( @@ -745,7 +745,7 @@ void test_vloxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_m( @@ -758,7 +758,7 @@ void test_vloxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_m( @@ -771,7 +771,7 @@ void test_vloxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_m( @@ -784,7 +784,7 @@ void test_vloxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_m( @@ -797,7 +797,7 @@ void test_vloxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_m( @@ -810,7 +810,7 @@ void test_vloxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_m( @@ -823,7 +823,7 @@ void test_vloxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_m( @@ -836,7 +836,7 @@ void test_vloxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_m( @@ -849,7 +849,7 @@ void test_vloxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, cons // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_m( @@ -862,7 +862,7 @@ void test_vloxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, cons // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m( @@ -875,7 +875,7 @@ void test_vloxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, cons // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_m( @@ -888,7 +888,7 @@ void test_vloxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vloxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_m( @@ -914,7 +914,7 @@ void test_vloxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_m( @@ -927,7 +927,7 @@ void test_vloxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m( @@ -940,7 +940,7 @@ void test_vloxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, c // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_m( @@ -953,7 +953,7 @@ void test_vloxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_m( @@ -966,7 +966,7 @@ void test_vloxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_m( @@ -979,7 +979,7 @@ void test_vloxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_m( @@ -992,7 +992,7 @@ void test_vloxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_m( @@ -1005,7 +1005,7 @@ void test_vloxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_m( @@ -1018,7 +1018,7 @@ void test_vloxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( @@ -1031,7 +1031,7 @@ void test_vloxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( @@ -1044,7 +1044,7 @@ void test_vloxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( @@ -1057,7 +1057,7 @@ void test_vloxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m( @@ -1070,7 +1070,7 @@ void test_vloxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_m( @@ -1083,7 +1083,7 @@ void test_vloxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, co // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_m( @@ -1096,7 +1096,7 @@ void test_vloxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, co // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( @@ -1109,7 +1109,7 @@ void test_vloxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, co // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( @@ -1122,7 +1122,7 @@ void test_vloxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_m( @@ -1135,7 +1135,7 @@ void test_vloxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_m( @@ -1148,7 +1148,7 @@ void test_vloxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_m( @@ -1161,7 +1161,7 @@ void test_vloxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( @@ -1174,7 +1174,7 @@ void test_vloxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_m( @@ -1187,7 +1187,7 @@ void test_vloxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_m( @@ -1200,7 +1200,7 @@ void test_vloxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_m( @@ -1213,7 +1213,7 @@ void test_vloxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_m( @@ -1226,7 +1226,7 @@ void test_vloxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_m( @@ -1239,7 +1239,7 @@ void test_vloxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_m( @@ -1252,6 +1252,6 @@ void test_vloxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c index 5538d6331eeb..d6b6a85f47e5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei16.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2( @@ -34,7 +34,7 @@ void test_vloxseg3ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1( @@ -49,7 +49,7 @@ void test_vloxseg3ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2( @@ -64,7 +64,7 @@ void test_vloxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2( @@ -79,7 +79,7 @@ void test_vloxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1( @@ -94,7 +94,7 @@ void test_vloxseg3ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2( @@ -109,7 +109,7 @@ void test_vloxseg3ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1( @@ -124,7 +124,7 @@ void test_vloxseg3ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2( @@ -139,7 +139,7 @@ void test_vloxseg3ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8( @@ -154,7 +154,7 @@ void test_vloxseg3ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4( @@ -169,7 +169,7 @@ void test_vloxseg3ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2( @@ -184,7 +184,7 @@ void test_vloxseg3ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1( @@ -199,7 +199,7 @@ void test_vloxseg3ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2( @@ -214,7 +214,7 @@ void test_vloxseg3ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4( @@ -229,7 +229,7 @@ void test_vloxseg3ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2( @@ -244,7 +244,7 @@ void test_vloxseg3ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1( @@ -259,7 +259,7 @@ void test_vloxseg3ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2( @@ -274,7 +274,7 @@ void test_vloxseg3ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2( @@ -289,7 +289,7 @@ void test_vloxseg3ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1( @@ -304,7 +304,7 @@ void test_vloxseg3ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2( @@ -319,7 +319,7 @@ void test_vloxseg3ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1( @@ -334,7 +334,7 @@ void test_vloxseg3ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2( @@ -349,7 +349,7 @@ void test_vloxseg3ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8( @@ -364,7 +364,7 @@ void test_vloxseg3ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4( @@ -379,7 +379,7 @@ void test_vloxseg3ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2( @@ -394,7 +394,7 @@ void test_vloxseg3ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1( @@ -409,7 +409,7 @@ void test_vloxseg3ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2( @@ -424,7 +424,7 @@ void test_vloxseg3ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4( @@ -439,7 +439,7 @@ void test_vloxseg3ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2( @@ -454,7 +454,7 @@ void test_vloxseg3ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1( @@ -469,7 +469,7 @@ void test_vloxseg3ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2( @@ -484,7 +484,7 @@ void test_vloxseg3ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2( @@ -499,7 +499,7 @@ void test_vloxseg3ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1( @@ -514,7 +514,7 @@ void test_vloxseg3ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2( @@ -529,7 +529,7 @@ void test_vloxseg3ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1( @@ -544,7 +544,7 @@ void test_vloxseg3ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2( @@ -559,7 +559,7 @@ void test_vloxseg3ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_m( @@ -574,7 +574,7 @@ void test_vloxseg3ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_m( @@ -589,7 +589,7 @@ void test_vloxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_m( @@ -604,7 +604,7 @@ void test_vloxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_m( @@ -619,7 +619,7 @@ void test_vloxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vloxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_m( @@ -649,7 +649,7 @@ void test_vloxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_m( @@ -664,7 +664,7 @@ void test_vloxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_m( @@ -679,7 +679,7 @@ void test_vloxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_m( @@ -694,7 +694,7 @@ void test_vloxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_m( @@ -709,7 +709,7 @@ void test_vloxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_m( @@ -724,7 +724,7 @@ void test_vloxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vloxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_m( @@ -754,7 +754,7 @@ void test_vloxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_m( @@ -769,7 +769,7 @@ void test_vloxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_m( @@ -784,7 +784,7 @@ void test_vloxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_m( @@ -799,7 +799,7 @@ void test_vloxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_m( @@ -814,7 +814,7 @@ void test_vloxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_m( @@ -829,7 +829,7 @@ void test_vloxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vloxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_m( @@ -859,7 +859,7 @@ void test_vloxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_m( @@ -874,7 +874,7 @@ void test_vloxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_m( @@ -889,7 +889,7 @@ void test_vloxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_m( @@ -904,7 +904,7 @@ void test_vloxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( @@ -919,7 +919,7 @@ void test_vloxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( @@ -934,7 +934,7 @@ void test_vloxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vloxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_m( @@ -964,7 +964,7 @@ void test_vloxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_m( @@ -979,7 +979,7 @@ void test_vloxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( @@ -994,7 +994,7 @@ void test_vloxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( @@ -1009,7 +1009,7 @@ void test_vloxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_m( @@ -1024,7 +1024,7 @@ void test_vloxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_m( @@ -1039,7 +1039,7 @@ void test_vloxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vloxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_m( @@ -1069,7 +1069,7 @@ void test_vloxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_m( @@ -1084,7 +1084,7 @@ void test_vloxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_m( @@ -1099,7 +1099,7 @@ void test_vloxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_m( @@ -1114,6 +1114,6 @@ void test_vloxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c index d99d86617d50..cfe347d66b70 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei32.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2( @@ -34,7 +34,7 @@ void test_vloxseg3ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1( @@ -49,7 +49,7 @@ void test_vloxseg3ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2( @@ -64,7 +64,7 @@ void test_vloxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2( @@ -79,7 +79,7 @@ void test_vloxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1( @@ -94,7 +94,7 @@ void test_vloxseg3ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2( @@ -109,7 +109,7 @@ void test_vloxseg3ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1( @@ -124,7 +124,7 @@ void test_vloxseg3ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2( @@ -139,7 +139,7 @@ void test_vloxseg3ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8( @@ -154,7 +154,7 @@ void test_vloxseg3ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4( @@ -169,7 +169,7 @@ void test_vloxseg3ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2( @@ -184,7 +184,7 @@ void test_vloxseg3ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1( @@ -199,7 +199,7 @@ void test_vloxseg3ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2( @@ -214,7 +214,7 @@ void test_vloxseg3ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4( @@ -229,7 +229,7 @@ void test_vloxseg3ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2( @@ -244,7 +244,7 @@ void test_vloxseg3ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1( @@ -259,7 +259,7 @@ void test_vloxseg3ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2( @@ -274,7 +274,7 @@ void test_vloxseg3ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2( @@ -289,7 +289,7 @@ void test_vloxseg3ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1( @@ -304,7 +304,7 @@ void test_vloxseg3ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2( @@ -319,7 +319,7 @@ void test_vloxseg3ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1( @@ -334,7 +334,7 @@ void test_vloxseg3ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2( @@ -349,7 +349,7 @@ void test_vloxseg3ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8( @@ -364,7 +364,7 @@ void test_vloxseg3ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4( @@ -379,7 +379,7 @@ void test_vloxseg3ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2( @@ -394,7 +394,7 @@ void test_vloxseg3ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1( @@ -409,7 +409,7 @@ void test_vloxseg3ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2( @@ -424,7 +424,7 @@ void test_vloxseg3ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4( @@ -439,7 +439,7 @@ void test_vloxseg3ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2( @@ -454,7 +454,7 @@ void test_vloxseg3ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1( @@ -469,7 +469,7 @@ void test_vloxseg3ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2( @@ -484,7 +484,7 @@ void test_vloxseg3ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2( @@ -499,7 +499,7 @@ void test_vloxseg3ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1( @@ -514,7 +514,7 @@ void test_vloxseg3ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2( @@ -529,7 +529,7 @@ void test_vloxseg3ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1( @@ -544,7 +544,7 @@ void test_vloxseg3ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2( @@ -559,7 +559,7 @@ void test_vloxseg3ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_m( @@ -574,7 +574,7 @@ void test_vloxseg3ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_m( @@ -589,7 +589,7 @@ void test_vloxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_m( @@ -604,7 +604,7 @@ void test_vloxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_m( @@ -619,7 +619,7 @@ void test_vloxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vloxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_m( @@ -649,7 +649,7 @@ void test_vloxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_m( @@ -664,7 +664,7 @@ void test_vloxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_m( @@ -679,7 +679,7 @@ void test_vloxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_m( @@ -694,7 +694,7 @@ void test_vloxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_m( @@ -709,7 +709,7 @@ void test_vloxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_m( @@ -724,7 +724,7 @@ void test_vloxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vloxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( @@ -754,7 +754,7 @@ void test_vloxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_m( @@ -769,7 +769,7 @@ void test_vloxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_m( @@ -784,7 +784,7 @@ void test_vloxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_m( @@ -799,7 +799,7 @@ void test_vloxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_m( @@ -814,7 +814,7 @@ void test_vloxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_m( @@ -829,7 +829,7 @@ void test_vloxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vloxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_m( @@ -859,7 +859,7 @@ void test_vloxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_m( @@ -874,7 +874,7 @@ void test_vloxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_m( @@ -889,7 +889,7 @@ void test_vloxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_m( @@ -904,7 +904,7 @@ void test_vloxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( @@ -919,7 +919,7 @@ void test_vloxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( @@ -934,7 +934,7 @@ void test_vloxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vloxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_m( @@ -964,7 +964,7 @@ void test_vloxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( @@ -979,7 +979,7 @@ void test_vloxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( @@ -994,7 +994,7 @@ void test_vloxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( @@ -1009,7 +1009,7 @@ void test_vloxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_m( @@ -1024,7 +1024,7 @@ void test_vloxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_m( @@ -1039,7 +1039,7 @@ void test_vloxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vloxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_m( @@ -1069,7 +1069,7 @@ void test_vloxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_m( @@ -1084,7 +1084,7 @@ void test_vloxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_m( @@ -1099,7 +1099,7 @@ void test_vloxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_m( @@ -1114,6 +1114,6 @@ void test_vloxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c index 969228760475..8e5c4bba7e8a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei64.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2( @@ -34,7 +34,7 @@ void test_vloxseg3ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1( @@ -49,7 +49,7 @@ void test_vloxseg3ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2( @@ -64,7 +64,7 @@ void test_vloxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2( @@ -79,7 +79,7 @@ void test_vloxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1( @@ -94,7 +94,7 @@ void test_vloxseg3ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2( @@ -109,7 +109,7 @@ void test_vloxseg3ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1( @@ -124,7 +124,7 @@ void test_vloxseg3ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2( @@ -139,7 +139,7 @@ void test_vloxseg3ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8( @@ -154,7 +154,7 @@ void test_vloxseg3ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4( @@ -169,7 +169,7 @@ void test_vloxseg3ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2( @@ -184,7 +184,7 @@ void test_vloxseg3ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1( @@ -199,7 +199,7 @@ void test_vloxseg3ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4( @@ -214,7 +214,7 @@ void test_vloxseg3ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2( @@ -229,7 +229,7 @@ void test_vloxseg3ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1( @@ -244,7 +244,7 @@ void test_vloxseg3ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2( @@ -259,7 +259,7 @@ void test_vloxseg3ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2( @@ -274,7 +274,7 @@ void test_vloxseg3ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1( @@ -289,7 +289,7 @@ void test_vloxseg3ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2( @@ -304,7 +304,7 @@ void test_vloxseg3ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1( @@ -319,7 +319,7 @@ void test_vloxseg3ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2( @@ -334,7 +334,7 @@ void test_vloxseg3ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8( @@ -349,7 +349,7 @@ void test_vloxseg3ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4( @@ -364,7 +364,7 @@ void test_vloxseg3ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2( @@ -379,7 +379,7 @@ void test_vloxseg3ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1( @@ -394,7 +394,7 @@ void test_vloxseg3ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4( @@ -409,7 +409,7 @@ void test_vloxseg3ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2( @@ -424,7 +424,7 @@ void test_vloxseg3ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1( @@ -439,7 +439,7 @@ void test_vloxseg3ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2( @@ -454,7 +454,7 @@ void test_vloxseg3ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2( @@ -469,7 +469,7 @@ void test_vloxseg3ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1( @@ -484,7 +484,7 @@ void test_vloxseg3ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2( @@ -499,7 +499,7 @@ void test_vloxseg3ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1( @@ -514,7 +514,7 @@ void test_vloxseg3ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2( @@ -529,7 +529,7 @@ void test_vloxseg3ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_m( @@ -544,7 +544,7 @@ void test_vloxseg3ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_m( @@ -559,7 +559,7 @@ void test_vloxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_m( @@ -574,7 +574,7 @@ void test_vloxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_m( @@ -589,7 +589,7 @@ void test_vloxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( @@ -604,7 +604,7 @@ void test_vloxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_m( @@ -619,7 +619,7 @@ void test_vloxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_m( @@ -634,7 +634,7 @@ void test_vloxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_m( @@ -649,7 +649,7 @@ void test_vloxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_m( @@ -664,7 +664,7 @@ void test_vloxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_m( @@ -679,7 +679,7 @@ void test_vloxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_m( @@ -694,7 +694,7 @@ void test_vloxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_m( @@ -709,7 +709,7 @@ void test_vloxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_m( @@ -724,7 +724,7 @@ void test_vloxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_m( @@ -739,7 +739,7 @@ void test_vloxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_m( @@ -754,7 +754,7 @@ void test_vloxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_m( @@ -769,7 +769,7 @@ void test_vloxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_m( @@ -784,7 +784,7 @@ void test_vloxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_m( @@ -799,7 +799,7 @@ void test_vloxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_m( @@ -814,7 +814,7 @@ void test_vloxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_m( @@ -829,7 +829,7 @@ void test_vloxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_m( @@ -844,7 +844,7 @@ void test_vloxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_m( @@ -859,7 +859,7 @@ void test_vloxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( @@ -874,7 +874,7 @@ void test_vloxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( @@ -889,7 +889,7 @@ void test_vloxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( @@ -904,7 +904,7 @@ void test_vloxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_m( @@ -919,7 +919,7 @@ void test_vloxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( @@ -934,7 +934,7 @@ void test_vloxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( @@ -949,7 +949,7 @@ void test_vloxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_m( @@ -964,7 +964,7 @@ void test_vloxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_m( @@ -979,7 +979,7 @@ void test_vloxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( @@ -994,7 +994,7 @@ void test_vloxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_m( @@ -1009,7 +1009,7 @@ void test_vloxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_m( @@ -1024,7 +1024,7 @@ void test_vloxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_m( @@ -1039,7 +1039,7 @@ void test_vloxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_m( @@ -1054,6 +1054,6 @@ void test_vloxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c index 0737d0b41ae2..513a44dfd6d8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg3ei8.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2( @@ -34,7 +34,7 @@ void test_vloxseg3ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1( @@ -49,7 +49,7 @@ void test_vloxseg3ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2( @@ -64,7 +64,7 @@ void test_vloxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2( @@ -79,7 +79,7 @@ void test_vloxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1( @@ -94,7 +94,7 @@ void test_vloxseg3ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2( @@ -109,7 +109,7 @@ void test_vloxseg3ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1( @@ -124,7 +124,7 @@ void test_vloxseg3ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2( @@ -139,7 +139,7 @@ void test_vloxseg3ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8( @@ -154,7 +154,7 @@ void test_vloxseg3ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4( @@ -169,7 +169,7 @@ void test_vloxseg3ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2( @@ -184,7 +184,7 @@ void test_vloxseg3ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1( @@ -199,7 +199,7 @@ void test_vloxseg3ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2( @@ -214,7 +214,7 @@ void test_vloxseg3ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4( @@ -229,7 +229,7 @@ void test_vloxseg3ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2( @@ -244,7 +244,7 @@ void test_vloxseg3ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1( @@ -259,7 +259,7 @@ void test_vloxseg3ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2( @@ -274,7 +274,7 @@ void test_vloxseg3ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2( @@ -289,7 +289,7 @@ void test_vloxseg3ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1( @@ -304,7 +304,7 @@ void test_vloxseg3ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2( @@ -319,7 +319,7 @@ void test_vloxseg3ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1( @@ -334,7 +334,7 @@ void test_vloxseg3ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2( @@ -349,7 +349,7 @@ void test_vloxseg3ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8( @@ -364,7 +364,7 @@ void test_vloxseg3ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4( @@ -379,7 +379,7 @@ void test_vloxseg3ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2( @@ -394,7 +394,7 @@ void test_vloxseg3ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1( @@ -409,7 +409,7 @@ void test_vloxseg3ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2( @@ -424,7 +424,7 @@ void test_vloxseg3ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4( @@ -439,7 +439,7 @@ void test_vloxseg3ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2( @@ -454,7 +454,7 @@ void test_vloxseg3ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1( @@ -469,7 +469,7 @@ void test_vloxseg3ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2( @@ -484,7 +484,7 @@ void test_vloxseg3ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2( @@ -499,7 +499,7 @@ void test_vloxseg3ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1( @@ -514,7 +514,7 @@ void test_vloxseg3ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2( @@ -529,7 +529,7 @@ void test_vloxseg3ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1( @@ -544,7 +544,7 @@ void test_vloxseg3ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2( @@ -559,7 +559,7 @@ void test_vloxseg3ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_m( @@ -574,7 +574,7 @@ void test_vloxseg3ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_m( @@ -589,7 +589,7 @@ void test_vloxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_m( @@ -604,7 +604,7 @@ void test_vloxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_m( @@ -619,7 +619,7 @@ void test_vloxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vloxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_m( @@ -649,7 +649,7 @@ void test_vloxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_m( @@ -664,7 +664,7 @@ void test_vloxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_m( @@ -679,7 +679,7 @@ void test_vloxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_m( @@ -694,7 +694,7 @@ void test_vloxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_m( @@ -709,7 +709,7 @@ void test_vloxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_m( @@ -724,7 +724,7 @@ void test_vloxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vloxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_m( @@ -754,7 +754,7 @@ void test_vloxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_m( @@ -769,7 +769,7 @@ void test_vloxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_m( @@ -784,7 +784,7 @@ void test_vloxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_m( @@ -799,7 +799,7 @@ void test_vloxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_m( @@ -814,7 +814,7 @@ void test_vloxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_m( @@ -829,7 +829,7 @@ void test_vloxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vloxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_m( @@ -859,7 +859,7 @@ void test_vloxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_m( @@ -874,7 +874,7 @@ void test_vloxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_m( @@ -889,7 +889,7 @@ void test_vloxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_m( @@ -904,7 +904,7 @@ void test_vloxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( @@ -919,7 +919,7 @@ void test_vloxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( @@ -934,7 +934,7 @@ void test_vloxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vloxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_m( @@ -964,7 +964,7 @@ void test_vloxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_m( @@ -979,7 +979,7 @@ void test_vloxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( @@ -994,7 +994,7 @@ void test_vloxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( @@ -1009,7 +1009,7 @@ void test_vloxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_m( @@ -1024,7 +1024,7 @@ void test_vloxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_m( @@ -1039,7 +1039,7 @@ void test_vloxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vloxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_m( @@ -1069,7 +1069,7 @@ void test_vloxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_m( @@ -1084,7 +1084,7 @@ void test_vloxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_m( @@ -1099,7 +1099,7 @@ void test_vloxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_m( @@ -1114,6 +1114,6 @@ void test_vloxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c index 7adc0a0650f7..ea1a7817cf92 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei16.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2( @@ -38,7 +38,7 @@ void test_vloxseg4ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1( @@ -55,7 +55,7 @@ void test_vloxseg4ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2( @@ -72,7 +72,7 @@ void test_vloxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2( @@ -89,7 +89,7 @@ void test_vloxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1( @@ -106,7 +106,7 @@ void test_vloxseg4ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2( @@ -123,7 +123,7 @@ void test_vloxseg4ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1( @@ -140,7 +140,7 @@ void test_vloxseg4ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2( @@ -157,7 +157,7 @@ void test_vloxseg4ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8( @@ -174,7 +174,7 @@ void test_vloxseg4ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4( @@ -191,7 +191,7 @@ void test_vloxseg4ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2( @@ -208,7 +208,7 @@ void test_vloxseg4ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1( @@ -225,7 +225,7 @@ void test_vloxseg4ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2( @@ -242,7 +242,7 @@ void test_vloxseg4ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4( @@ -259,7 +259,7 @@ void test_vloxseg4ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2( @@ -276,7 +276,7 @@ void test_vloxseg4ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1( @@ -293,7 +293,7 @@ void test_vloxseg4ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2( @@ -310,7 +310,7 @@ void test_vloxseg4ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2( @@ -327,7 +327,7 @@ void test_vloxseg4ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1( @@ -344,7 +344,7 @@ void test_vloxseg4ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2( @@ -361,7 +361,7 @@ void test_vloxseg4ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1( @@ -378,7 +378,7 @@ void test_vloxseg4ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2( @@ -395,7 +395,7 @@ void test_vloxseg4ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8( @@ -412,7 +412,7 @@ void test_vloxseg4ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4( @@ -429,7 +429,7 @@ void test_vloxseg4ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2( @@ -446,7 +446,7 @@ void test_vloxseg4ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1( @@ -463,7 +463,7 @@ void test_vloxseg4ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2( @@ -480,7 +480,7 @@ void test_vloxseg4ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4( @@ -497,7 +497,7 @@ void test_vloxseg4ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2( @@ -514,7 +514,7 @@ void test_vloxseg4ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1( @@ -531,7 +531,7 @@ void test_vloxseg4ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2( @@ -548,7 +548,7 @@ void test_vloxseg4ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2( @@ -565,7 +565,7 @@ void test_vloxseg4ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1( @@ -582,7 +582,7 @@ void test_vloxseg4ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2( @@ -599,7 +599,7 @@ void test_vloxseg4ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1( @@ -616,7 +616,7 @@ void test_vloxseg4ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2( @@ -633,7 +633,7 @@ void test_vloxseg4ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_m( @@ -650,7 +650,7 @@ void test_vloxseg4ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_m( @@ -667,7 +667,7 @@ void test_vloxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_m( @@ -684,7 +684,7 @@ void test_vloxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_m( @@ -701,7 +701,7 @@ void test_vloxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( @@ -718,7 +718,7 @@ void test_vloxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_m( @@ -735,7 +735,7 @@ void test_vloxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_m( @@ -752,7 +752,7 @@ void test_vloxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_m( @@ -769,7 +769,7 @@ void test_vloxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_m( @@ -786,7 +786,7 @@ void test_vloxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_m( @@ -803,7 +803,7 @@ void test_vloxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_m( @@ -820,7 +820,7 @@ void test_vloxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_m( @@ -837,7 +837,7 @@ void test_vloxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_m( @@ -854,7 +854,7 @@ void test_vloxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_m( @@ -871,7 +871,7 @@ void test_vloxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_m( @@ -888,7 +888,7 @@ void test_vloxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_m( @@ -905,7 +905,7 @@ void test_vloxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_m( @@ -922,7 +922,7 @@ void test_vloxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_m( @@ -939,7 +939,7 @@ void test_vloxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_m( @@ -956,7 +956,7 @@ void test_vloxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_m( @@ -973,7 +973,7 @@ void test_vloxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_m( @@ -990,7 +990,7 @@ void test_vloxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_m( @@ -1007,7 +1007,7 @@ void test_vloxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_m( @@ -1024,7 +1024,7 @@ void test_vloxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( @@ -1041,7 +1041,7 @@ void test_vloxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( @@ -1058,7 +1058,7 @@ void test_vloxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( @@ -1075,7 +1075,7 @@ void test_vloxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_m( @@ -1092,7 +1092,7 @@ void test_vloxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_m( @@ -1109,7 +1109,7 @@ void test_vloxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( @@ -1126,7 +1126,7 @@ void test_vloxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( @@ -1143,7 +1143,7 @@ void test_vloxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_m( @@ -1160,7 +1160,7 @@ void test_vloxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_m( @@ -1177,7 +1177,7 @@ void test_vloxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( @@ -1194,7 +1194,7 @@ void test_vloxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_m( @@ -1211,7 +1211,7 @@ void test_vloxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_m( @@ -1228,7 +1228,7 @@ void test_vloxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_m( @@ -1245,7 +1245,7 @@ void test_vloxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_m( @@ -1262,6 +1262,6 @@ void test_vloxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c index e04e6638c996..8adb1477b139 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei32.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2( @@ -38,7 +38,7 @@ void test_vloxseg4ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1( @@ -55,7 +55,7 @@ void test_vloxseg4ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2( @@ -72,7 +72,7 @@ void test_vloxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2( @@ -89,7 +89,7 @@ void test_vloxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1( @@ -106,7 +106,7 @@ void test_vloxseg4ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2( @@ -123,7 +123,7 @@ void test_vloxseg4ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1( @@ -140,7 +140,7 @@ void test_vloxseg4ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2( @@ -157,7 +157,7 @@ void test_vloxseg4ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8( @@ -174,7 +174,7 @@ void test_vloxseg4ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4( @@ -191,7 +191,7 @@ void test_vloxseg4ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2( @@ -208,7 +208,7 @@ void test_vloxseg4ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1( @@ -225,7 +225,7 @@ void test_vloxseg4ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2( @@ -242,7 +242,7 @@ void test_vloxseg4ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4( @@ -259,7 +259,7 @@ void test_vloxseg4ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2( @@ -276,7 +276,7 @@ void test_vloxseg4ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1( @@ -293,7 +293,7 @@ void test_vloxseg4ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2( @@ -310,7 +310,7 @@ void test_vloxseg4ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2( @@ -327,7 +327,7 @@ void test_vloxseg4ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1( @@ -344,7 +344,7 @@ void test_vloxseg4ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2( @@ -361,7 +361,7 @@ void test_vloxseg4ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1( @@ -378,7 +378,7 @@ void test_vloxseg4ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2( @@ -395,7 +395,7 @@ void test_vloxseg4ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8( @@ -412,7 +412,7 @@ void test_vloxseg4ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4( @@ -429,7 +429,7 @@ void test_vloxseg4ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2( @@ -446,7 +446,7 @@ void test_vloxseg4ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1( @@ -463,7 +463,7 @@ void test_vloxseg4ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2( @@ -480,7 +480,7 @@ void test_vloxseg4ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4( @@ -497,7 +497,7 @@ void test_vloxseg4ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2( @@ -514,7 +514,7 @@ void test_vloxseg4ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1( @@ -531,7 +531,7 @@ void test_vloxseg4ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2( @@ -548,7 +548,7 @@ void test_vloxseg4ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2( @@ -565,7 +565,7 @@ void test_vloxseg4ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1( @@ -582,7 +582,7 @@ void test_vloxseg4ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2( @@ -599,7 +599,7 @@ void test_vloxseg4ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1( @@ -616,7 +616,7 @@ void test_vloxseg4ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2( @@ -633,7 +633,7 @@ void test_vloxseg4ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_m( @@ -650,7 +650,7 @@ void test_vloxseg4ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_m( @@ -667,7 +667,7 @@ void test_vloxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_m( @@ -684,7 +684,7 @@ void test_vloxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_m( @@ -701,7 +701,7 @@ void test_vloxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( @@ -718,7 +718,7 @@ void test_vloxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_m( @@ -735,7 +735,7 @@ void test_vloxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_m( @@ -752,7 +752,7 @@ void test_vloxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_m( @@ -769,7 +769,7 @@ void test_vloxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_m( @@ -786,7 +786,7 @@ void test_vloxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_m( @@ -803,7 +803,7 @@ void test_vloxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_m( @@ -820,7 +820,7 @@ void test_vloxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_m( @@ -837,7 +837,7 @@ void test_vloxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( @@ -854,7 +854,7 @@ void test_vloxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_m( @@ -871,7 +871,7 @@ void test_vloxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_m( @@ -888,7 +888,7 @@ void test_vloxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_m( @@ -905,7 +905,7 @@ void test_vloxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_m( @@ -922,7 +922,7 @@ void test_vloxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_m( @@ -939,7 +939,7 @@ void test_vloxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( @@ -956,7 +956,7 @@ void test_vloxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_m( @@ -973,7 +973,7 @@ void test_vloxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_m( @@ -990,7 +990,7 @@ void test_vloxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_m( @@ -1007,7 +1007,7 @@ void test_vloxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_m( @@ -1024,7 +1024,7 @@ void test_vloxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( @@ -1041,7 +1041,7 @@ void test_vloxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( @@ -1058,7 +1058,7 @@ void test_vloxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( @@ -1075,7 +1075,7 @@ void test_vloxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_m( @@ -1092,7 +1092,7 @@ void test_vloxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( @@ -1109,7 +1109,7 @@ void test_vloxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( @@ -1126,7 +1126,7 @@ void test_vloxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( @@ -1143,7 +1143,7 @@ void test_vloxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_m( @@ -1160,7 +1160,7 @@ void test_vloxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_m( @@ -1177,7 +1177,7 @@ void test_vloxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( @@ -1194,7 +1194,7 @@ void test_vloxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_m( @@ -1211,7 +1211,7 @@ void test_vloxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_m( @@ -1228,7 +1228,7 @@ void test_vloxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_m( @@ -1245,7 +1245,7 @@ void test_vloxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_m( @@ -1262,6 +1262,6 @@ void test_vloxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c index 72fb0d2dd12a..3d76dee2b96f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei64.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2( @@ -38,7 +38,7 @@ void test_vloxseg4ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1( @@ -55,7 +55,7 @@ void test_vloxseg4ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2( @@ -72,7 +72,7 @@ void test_vloxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2( @@ -89,7 +89,7 @@ void test_vloxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1( @@ -106,7 +106,7 @@ void test_vloxseg4ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2( @@ -123,7 +123,7 @@ void test_vloxseg4ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1( @@ -140,7 +140,7 @@ void test_vloxseg4ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2( @@ -157,7 +157,7 @@ void test_vloxseg4ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8( @@ -174,7 +174,7 @@ void test_vloxseg4ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4( @@ -191,7 +191,7 @@ void test_vloxseg4ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2( @@ -208,7 +208,7 @@ void test_vloxseg4ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1( @@ -225,7 +225,7 @@ void test_vloxseg4ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4( @@ -242,7 +242,7 @@ void test_vloxseg4ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2( @@ -259,7 +259,7 @@ void test_vloxseg4ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1( @@ -276,7 +276,7 @@ void test_vloxseg4ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2( @@ -293,7 +293,7 @@ void test_vloxseg4ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2( @@ -310,7 +310,7 @@ void test_vloxseg4ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1( @@ -327,7 +327,7 @@ void test_vloxseg4ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2( @@ -344,7 +344,7 @@ void test_vloxseg4ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1( @@ -361,7 +361,7 @@ void test_vloxseg4ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2( @@ -378,7 +378,7 @@ void test_vloxseg4ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8( @@ -395,7 +395,7 @@ void test_vloxseg4ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4( @@ -412,7 +412,7 @@ void test_vloxseg4ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2( @@ -429,7 +429,7 @@ void test_vloxseg4ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1( @@ -446,7 +446,7 @@ void test_vloxseg4ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4( @@ -463,7 +463,7 @@ void test_vloxseg4ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2( @@ -480,7 +480,7 @@ void test_vloxseg4ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1( @@ -497,7 +497,7 @@ void test_vloxseg4ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2( @@ -514,7 +514,7 @@ void test_vloxseg4ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2( @@ -531,7 +531,7 @@ void test_vloxseg4ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1( @@ -548,7 +548,7 @@ void test_vloxseg4ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2( @@ -565,7 +565,7 @@ void test_vloxseg4ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1( @@ -582,7 +582,7 @@ void test_vloxseg4ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2( @@ -599,7 +599,7 @@ void test_vloxseg4ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_m( @@ -616,7 +616,7 @@ void test_vloxseg4ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_m( @@ -633,7 +633,7 @@ void test_vloxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_m( @@ -650,7 +650,7 @@ void test_vloxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_m( @@ -667,7 +667,7 @@ void test_vloxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( @@ -684,7 +684,7 @@ void test_vloxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_m( @@ -701,7 +701,7 @@ void test_vloxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_m( @@ -718,7 +718,7 @@ void test_vloxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_m( @@ -735,7 +735,7 @@ void test_vloxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_m( @@ -752,7 +752,7 @@ void test_vloxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_m( @@ -769,7 +769,7 @@ void test_vloxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_m( @@ -786,7 +786,7 @@ void test_vloxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_m( @@ -803,7 +803,7 @@ void test_vloxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_m( @@ -820,7 +820,7 @@ void test_vloxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_m( @@ -837,7 +837,7 @@ void test_vloxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_m( @@ -854,7 +854,7 @@ void test_vloxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_m( @@ -871,7 +871,7 @@ void test_vloxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_m( @@ -888,7 +888,7 @@ void test_vloxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_m( @@ -905,7 +905,7 @@ void test_vloxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_m( @@ -922,7 +922,7 @@ void test_vloxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_m( @@ -939,7 +939,7 @@ void test_vloxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_m( @@ -956,7 +956,7 @@ void test_vloxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_m( @@ -973,7 +973,7 @@ void test_vloxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( @@ -990,7 +990,7 @@ void test_vloxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( @@ -1007,7 +1007,7 @@ void test_vloxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( @@ -1024,7 +1024,7 @@ void test_vloxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_m( @@ -1041,7 +1041,7 @@ void test_vloxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( @@ -1058,7 +1058,7 @@ void test_vloxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( @@ -1075,7 +1075,7 @@ void test_vloxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_m( @@ -1092,7 +1092,7 @@ void test_vloxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_m( @@ -1109,7 +1109,7 @@ void test_vloxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( @@ -1126,7 +1126,7 @@ void test_vloxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_m( @@ -1143,7 +1143,7 @@ void test_vloxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_m( @@ -1160,7 +1160,7 @@ void test_vloxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_m( @@ -1177,7 +1177,7 @@ void test_vloxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_m( @@ -1194,6 +1194,6 @@ void test_vloxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c index bd04606cbdf8..8631124c591e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg4ei8.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2( @@ -38,7 +38,7 @@ void test_vloxseg4ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1( @@ -55,7 +55,7 @@ void test_vloxseg4ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2( @@ -72,7 +72,7 @@ void test_vloxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2( @@ -89,7 +89,7 @@ void test_vloxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1( @@ -106,7 +106,7 @@ void test_vloxseg4ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2( @@ -123,7 +123,7 @@ void test_vloxseg4ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1( @@ -140,7 +140,7 @@ void test_vloxseg4ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2( @@ -157,7 +157,7 @@ void test_vloxseg4ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8( @@ -174,7 +174,7 @@ void test_vloxseg4ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4( @@ -191,7 +191,7 @@ void test_vloxseg4ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2( @@ -208,7 +208,7 @@ void test_vloxseg4ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1( @@ -225,7 +225,7 @@ void test_vloxseg4ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2( @@ -242,7 +242,7 @@ void test_vloxseg4ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4( @@ -259,7 +259,7 @@ void test_vloxseg4ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2( @@ -276,7 +276,7 @@ void test_vloxseg4ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1( @@ -293,7 +293,7 @@ void test_vloxseg4ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2( @@ -310,7 +310,7 @@ void test_vloxseg4ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2( @@ -327,7 +327,7 @@ void test_vloxseg4ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1( @@ -344,7 +344,7 @@ void test_vloxseg4ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2( @@ -361,7 +361,7 @@ void test_vloxseg4ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1( @@ -378,7 +378,7 @@ void test_vloxseg4ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2( @@ -395,7 +395,7 @@ void test_vloxseg4ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8( @@ -412,7 +412,7 @@ void test_vloxseg4ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4( @@ -429,7 +429,7 @@ void test_vloxseg4ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2( @@ -446,7 +446,7 @@ void test_vloxseg4ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1( @@ -463,7 +463,7 @@ void test_vloxseg4ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2( @@ -480,7 +480,7 @@ void test_vloxseg4ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4( @@ -497,7 +497,7 @@ void test_vloxseg4ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2( @@ -514,7 +514,7 @@ void test_vloxseg4ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1( @@ -531,7 +531,7 @@ void test_vloxseg4ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2( @@ -548,7 +548,7 @@ void test_vloxseg4ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2( @@ -565,7 +565,7 @@ void test_vloxseg4ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1( @@ -582,7 +582,7 @@ void test_vloxseg4ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2( @@ -599,7 +599,7 @@ void test_vloxseg4ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1( @@ -616,7 +616,7 @@ void test_vloxseg4ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2( @@ -633,7 +633,7 @@ void test_vloxseg4ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_m( @@ -650,7 +650,7 @@ void test_vloxseg4ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_m( @@ -667,7 +667,7 @@ void test_vloxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_m( @@ -684,7 +684,7 @@ void test_vloxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_m( @@ -701,7 +701,7 @@ void test_vloxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( @@ -718,7 +718,7 @@ void test_vloxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_m( @@ -735,7 +735,7 @@ void test_vloxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_m( @@ -752,7 +752,7 @@ void test_vloxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_m( @@ -769,7 +769,7 @@ void test_vloxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_m( @@ -786,7 +786,7 @@ void test_vloxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_m( @@ -803,7 +803,7 @@ void test_vloxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_m( @@ -820,7 +820,7 @@ void test_vloxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_m( @@ -837,7 +837,7 @@ void test_vloxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_m( @@ -854,7 +854,7 @@ void test_vloxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_m( @@ -871,7 +871,7 @@ void test_vloxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_m( @@ -888,7 +888,7 @@ void test_vloxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_m( @@ -905,7 +905,7 @@ void test_vloxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_m( @@ -922,7 +922,7 @@ void test_vloxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_m( @@ -939,7 +939,7 @@ void test_vloxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_m( @@ -956,7 +956,7 @@ void test_vloxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_m( @@ -973,7 +973,7 @@ void test_vloxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_m( @@ -990,7 +990,7 @@ void test_vloxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_m( @@ -1007,7 +1007,7 @@ void test_vloxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_m( @@ -1024,7 +1024,7 @@ void test_vloxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( @@ -1041,7 +1041,7 @@ void test_vloxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( @@ -1058,7 +1058,7 @@ void test_vloxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( @@ -1075,7 +1075,7 @@ void test_vloxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_m( @@ -1092,7 +1092,7 @@ void test_vloxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_m( @@ -1109,7 +1109,7 @@ void test_vloxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( @@ -1126,7 +1126,7 @@ void test_vloxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( @@ -1143,7 +1143,7 @@ void test_vloxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_m( @@ -1160,7 +1160,7 @@ void test_vloxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_m( @@ -1177,7 +1177,7 @@ void test_vloxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( @@ -1194,7 +1194,7 @@ void test_vloxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_m( @@ -1211,7 +1211,7 @@ void test_vloxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_m( @@ -1228,7 +1228,7 @@ void test_vloxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_m( @@ -1245,7 +1245,7 @@ void test_vloxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_m( @@ -1262,6 +1262,6 @@ void test_vloxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c index 9f6cee14aaae..04c3df1626b3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei16.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2( @@ -42,7 +42,7 @@ void test_vloxseg5ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1( @@ -61,7 +61,7 @@ void test_vloxseg5ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2( @@ -80,7 +80,7 @@ void test_vloxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1( @@ -99,7 +99,7 @@ void test_vloxseg5ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1( @@ -118,7 +118,7 @@ void test_vloxseg5ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8( @@ -137,7 +137,7 @@ void test_vloxseg5ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4( @@ -156,7 +156,7 @@ void test_vloxseg5ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2( @@ -175,7 +175,7 @@ void test_vloxseg5ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1( @@ -194,7 +194,7 @@ void test_vloxseg5ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4( @@ -213,7 +213,7 @@ void test_vloxseg5ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2( @@ -232,7 +232,7 @@ void test_vloxseg5ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1( @@ -251,7 +251,7 @@ void test_vloxseg5ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2( @@ -270,7 +270,7 @@ void test_vloxseg5ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1( @@ -289,7 +289,7 @@ void test_vloxseg5ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1( @@ -308,7 +308,7 @@ void test_vloxseg5ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8( @@ -327,7 +327,7 @@ void test_vloxseg5ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4( @@ -346,7 +346,7 @@ void test_vloxseg5ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2( @@ -365,7 +365,7 @@ void test_vloxseg5ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1( @@ -384,7 +384,7 @@ void test_vloxseg5ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4( @@ -403,7 +403,7 @@ void test_vloxseg5ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2( @@ -422,7 +422,7 @@ void test_vloxseg5ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1( @@ -441,7 +441,7 @@ void test_vloxseg5ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2( @@ -460,7 +460,7 @@ void test_vloxseg5ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1( @@ -479,7 +479,7 @@ void test_vloxseg5ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1( @@ -498,7 +498,7 @@ void test_vloxseg5ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_m( @@ -517,7 +517,7 @@ void test_vloxseg5ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_m( @@ -536,7 +536,7 @@ void test_vloxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_m( @@ -555,7 +555,7 @@ void test_vloxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( @@ -574,7 +574,7 @@ void test_vloxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_m( @@ -593,7 +593,7 @@ void test_vloxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_m( @@ -612,7 +612,7 @@ void test_vloxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_m( @@ -631,7 +631,7 @@ void test_vloxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_m( @@ -650,7 +650,7 @@ void test_vloxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_m( @@ -669,7 +669,7 @@ void test_vloxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_m( @@ -688,7 +688,7 @@ void test_vloxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_m( @@ -707,7 +707,7 @@ void test_vloxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_m( @@ -726,7 +726,7 @@ void test_vloxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_m( @@ -745,7 +745,7 @@ void test_vloxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_m( @@ -764,7 +764,7 @@ void test_vloxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_m( @@ -783,7 +783,7 @@ void test_vloxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_m( @@ -802,7 +802,7 @@ void test_vloxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( @@ -821,7 +821,7 @@ void test_vloxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( @@ -840,7 +840,7 @@ void test_vloxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( @@ -859,7 +859,7 @@ void test_vloxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_m( @@ -878,7 +878,7 @@ void test_vloxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( @@ -897,7 +897,7 @@ void test_vloxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( @@ -916,7 +916,7 @@ void test_vloxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_m( @@ -935,7 +935,7 @@ void test_vloxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( @@ -954,7 +954,7 @@ void test_vloxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_m( @@ -973,7 +973,7 @@ void test_vloxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_m( @@ -992,6 +992,6 @@ void test_vloxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c index 560f2fa9c218..44fe7361db71 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei32.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2( @@ -42,7 +42,7 @@ void test_vloxseg5ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1( @@ -61,7 +61,7 @@ void test_vloxseg5ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2( @@ -80,7 +80,7 @@ void test_vloxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1( @@ -99,7 +99,7 @@ void test_vloxseg5ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1( @@ -118,7 +118,7 @@ void test_vloxseg5ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8( @@ -137,7 +137,7 @@ void test_vloxseg5ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4( @@ -156,7 +156,7 @@ void test_vloxseg5ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2( @@ -175,7 +175,7 @@ void test_vloxseg5ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1( @@ -194,7 +194,7 @@ void test_vloxseg5ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4( @@ -213,7 +213,7 @@ void test_vloxseg5ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2( @@ -232,7 +232,7 @@ void test_vloxseg5ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1( @@ -251,7 +251,7 @@ void test_vloxseg5ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2( @@ -270,7 +270,7 @@ void test_vloxseg5ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1( @@ -289,7 +289,7 @@ void test_vloxseg5ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1( @@ -308,7 +308,7 @@ void test_vloxseg5ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8( @@ -327,7 +327,7 @@ void test_vloxseg5ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4( @@ -346,7 +346,7 @@ void test_vloxseg5ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2( @@ -365,7 +365,7 @@ void test_vloxseg5ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1( @@ -384,7 +384,7 @@ void test_vloxseg5ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4( @@ -403,7 +403,7 @@ void test_vloxseg5ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2( @@ -422,7 +422,7 @@ void test_vloxseg5ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1( @@ -441,7 +441,7 @@ void test_vloxseg5ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2( @@ -460,7 +460,7 @@ void test_vloxseg5ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1( @@ -479,7 +479,7 @@ void test_vloxseg5ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1( @@ -498,7 +498,7 @@ void test_vloxseg5ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_m( @@ -517,7 +517,7 @@ void test_vloxseg5ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_m( @@ -536,7 +536,7 @@ void test_vloxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_m( @@ -555,7 +555,7 @@ void test_vloxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( @@ -574,7 +574,7 @@ void test_vloxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_m( @@ -593,7 +593,7 @@ void test_vloxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_m( @@ -612,7 +612,7 @@ void test_vloxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_m( @@ -631,7 +631,7 @@ void test_vloxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_m( @@ -650,7 +650,7 @@ void test_vloxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_m( @@ -669,7 +669,7 @@ void test_vloxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_m( @@ -688,7 +688,7 @@ void test_vloxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_m( @@ -707,7 +707,7 @@ void test_vloxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_m( @@ -726,7 +726,7 @@ void test_vloxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_m( @@ -745,7 +745,7 @@ void test_vloxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( @@ -764,7 +764,7 @@ void test_vloxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_m( @@ -783,7 +783,7 @@ void test_vloxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_m( @@ -802,7 +802,7 @@ void test_vloxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( @@ -821,7 +821,7 @@ void test_vloxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( @@ -840,7 +840,7 @@ void test_vloxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( @@ -859,7 +859,7 @@ void test_vloxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_m( @@ -878,7 +878,7 @@ void test_vloxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( @@ -897,7 +897,7 @@ void test_vloxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( @@ -916,7 +916,7 @@ void test_vloxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_m( @@ -935,7 +935,7 @@ void test_vloxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( @@ -954,7 +954,7 @@ void test_vloxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_m( @@ -973,7 +973,7 @@ void test_vloxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_m( @@ -992,6 +992,6 @@ void test_vloxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c index cfbf306e604a..456306e69caa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei64.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2( @@ -42,7 +42,7 @@ void test_vloxseg5ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1( @@ -61,7 +61,7 @@ void test_vloxseg5ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2( @@ -80,7 +80,7 @@ void test_vloxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1( @@ -99,7 +99,7 @@ void test_vloxseg5ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1( @@ -118,7 +118,7 @@ void test_vloxseg5ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8( @@ -137,7 +137,7 @@ void test_vloxseg5ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4( @@ -156,7 +156,7 @@ void test_vloxseg5ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2( @@ -175,7 +175,7 @@ void test_vloxseg5ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1( @@ -194,7 +194,7 @@ void test_vloxseg5ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4( @@ -213,7 +213,7 @@ void test_vloxseg5ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2( @@ -232,7 +232,7 @@ void test_vloxseg5ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1( @@ -251,7 +251,7 @@ void test_vloxseg5ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2( @@ -270,7 +270,7 @@ void test_vloxseg5ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1( @@ -289,7 +289,7 @@ void test_vloxseg5ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1( @@ -308,7 +308,7 @@ void test_vloxseg5ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8( @@ -327,7 +327,7 @@ void test_vloxseg5ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4( @@ -346,7 +346,7 @@ void test_vloxseg5ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2( @@ -365,7 +365,7 @@ void test_vloxseg5ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1( @@ -384,7 +384,7 @@ void test_vloxseg5ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4( @@ -403,7 +403,7 @@ void test_vloxseg5ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2( @@ -422,7 +422,7 @@ void test_vloxseg5ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1( @@ -441,7 +441,7 @@ void test_vloxseg5ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2( @@ -460,7 +460,7 @@ void test_vloxseg5ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1( @@ -479,7 +479,7 @@ void test_vloxseg5ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1( @@ -498,7 +498,7 @@ void test_vloxseg5ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_m( @@ -517,7 +517,7 @@ void test_vloxseg5ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_m( @@ -536,7 +536,7 @@ void test_vloxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_m( @@ -555,7 +555,7 @@ void test_vloxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( @@ -574,7 +574,7 @@ void test_vloxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_m( @@ -593,7 +593,7 @@ void test_vloxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_m( @@ -612,7 +612,7 @@ void test_vloxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_m( @@ -631,7 +631,7 @@ void test_vloxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_m( @@ -650,7 +650,7 @@ void test_vloxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_m( @@ -669,7 +669,7 @@ void test_vloxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_m( @@ -688,7 +688,7 @@ void test_vloxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_m( @@ -707,7 +707,7 @@ void test_vloxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_m( @@ -726,7 +726,7 @@ void test_vloxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_m( @@ -745,7 +745,7 @@ void test_vloxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_m( @@ -764,7 +764,7 @@ void test_vloxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_m( @@ -783,7 +783,7 @@ void test_vloxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_m( @@ -802,7 +802,7 @@ void test_vloxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( @@ -821,7 +821,7 @@ void test_vloxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( @@ -840,7 +840,7 @@ void test_vloxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( @@ -859,7 +859,7 @@ void test_vloxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_m( @@ -878,7 +878,7 @@ void test_vloxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( @@ -897,7 +897,7 @@ void test_vloxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( @@ -916,7 +916,7 @@ void test_vloxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_m( @@ -935,7 +935,7 @@ void test_vloxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( @@ -954,7 +954,7 @@ void test_vloxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_m( @@ -973,7 +973,7 @@ void test_vloxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_m( @@ -992,6 +992,6 @@ void test_vloxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c index 9289c5307cd1..dac9fafbddfd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg5ei8.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2( @@ -42,7 +42,7 @@ void test_vloxseg5ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1( @@ -61,7 +61,7 @@ void test_vloxseg5ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2( @@ -80,7 +80,7 @@ void test_vloxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1( @@ -99,7 +99,7 @@ void test_vloxseg5ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1( @@ -118,7 +118,7 @@ void test_vloxseg5ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8( @@ -137,7 +137,7 @@ void test_vloxseg5ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4( @@ -156,7 +156,7 @@ void test_vloxseg5ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2( @@ -175,7 +175,7 @@ void test_vloxseg5ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1( @@ -194,7 +194,7 @@ void test_vloxseg5ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4( @@ -213,7 +213,7 @@ void test_vloxseg5ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2( @@ -232,7 +232,7 @@ void test_vloxseg5ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1( @@ -251,7 +251,7 @@ void test_vloxseg5ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2( @@ -270,7 +270,7 @@ void test_vloxseg5ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1( @@ -289,7 +289,7 @@ void test_vloxseg5ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1( @@ -308,7 +308,7 @@ void test_vloxseg5ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8( @@ -327,7 +327,7 @@ void test_vloxseg5ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4( @@ -346,7 +346,7 @@ void test_vloxseg5ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2( @@ -365,7 +365,7 @@ void test_vloxseg5ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1( @@ -384,7 +384,7 @@ void test_vloxseg5ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4( @@ -403,7 +403,7 @@ void test_vloxseg5ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2( @@ -422,7 +422,7 @@ void test_vloxseg5ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1( @@ -441,7 +441,7 @@ void test_vloxseg5ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2( @@ -460,7 +460,7 @@ void test_vloxseg5ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1( @@ -479,7 +479,7 @@ void test_vloxseg5ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1( @@ -498,7 +498,7 @@ void test_vloxseg5ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_m( @@ -517,7 +517,7 @@ void test_vloxseg5ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_m( @@ -536,7 +536,7 @@ void test_vloxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_m( @@ -555,7 +555,7 @@ void test_vloxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( @@ -574,7 +574,7 @@ void test_vloxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_m( @@ -593,7 +593,7 @@ void test_vloxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_m( @@ -612,7 +612,7 @@ void test_vloxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_m( @@ -631,7 +631,7 @@ void test_vloxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_m( @@ -650,7 +650,7 @@ void test_vloxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_m( @@ -669,7 +669,7 @@ void test_vloxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_m( @@ -688,7 +688,7 @@ void test_vloxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_m( @@ -707,7 +707,7 @@ void test_vloxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_m( @@ -726,7 +726,7 @@ void test_vloxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_m( @@ -745,7 +745,7 @@ void test_vloxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_m( @@ -764,7 +764,7 @@ void test_vloxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_m( @@ -783,7 +783,7 @@ void test_vloxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_m( @@ -802,7 +802,7 @@ void test_vloxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( @@ -821,7 +821,7 @@ void test_vloxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( @@ -840,7 +840,7 @@ void test_vloxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( @@ -859,7 +859,7 @@ void test_vloxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_m( @@ -878,7 +878,7 @@ void test_vloxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( @@ -897,7 +897,7 @@ void test_vloxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( @@ -916,7 +916,7 @@ void test_vloxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_m( @@ -935,7 +935,7 @@ void test_vloxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( @@ -954,7 +954,7 @@ void test_vloxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_m( @@ -973,7 +973,7 @@ void test_vloxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_m( @@ -992,6 +992,6 @@ void test_vloxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c index 8796c884c3e2..cf8c5d0124f1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei16.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2( @@ -46,7 +46,7 @@ void test_vloxseg6ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1( @@ -67,7 +67,7 @@ void test_vloxseg6ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2( @@ -88,7 +88,7 @@ void test_vloxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1( @@ -109,7 +109,7 @@ void test_vloxseg6ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1( @@ -130,7 +130,7 @@ void test_vloxseg6ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8( @@ -151,7 +151,7 @@ void test_vloxseg6ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4( @@ -172,7 +172,7 @@ void test_vloxseg6ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2( @@ -193,7 +193,7 @@ void test_vloxseg6ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1( @@ -214,7 +214,7 @@ void test_vloxseg6ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4( @@ -235,7 +235,7 @@ void test_vloxseg6ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2( @@ -256,7 +256,7 @@ void test_vloxseg6ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1( @@ -277,7 +277,7 @@ void test_vloxseg6ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2( @@ -298,7 +298,7 @@ void test_vloxseg6ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1( @@ -319,7 +319,7 @@ void test_vloxseg6ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1( @@ -340,7 +340,7 @@ void test_vloxseg6ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8( @@ -361,7 +361,7 @@ void test_vloxseg6ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4( @@ -382,7 +382,7 @@ void test_vloxseg6ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2( @@ -403,7 +403,7 @@ void test_vloxseg6ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1( @@ -424,7 +424,7 @@ void test_vloxseg6ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4( @@ -445,7 +445,7 @@ void test_vloxseg6ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2( @@ -466,7 +466,7 @@ void test_vloxseg6ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1( @@ -487,7 +487,7 @@ void test_vloxseg6ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2( @@ -508,7 +508,7 @@ void test_vloxseg6ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1( @@ -529,7 +529,7 @@ void test_vloxseg6ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1( @@ -550,7 +550,7 @@ void test_vloxseg6ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_m( @@ -571,7 +571,7 @@ void test_vloxseg6ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_m( @@ -592,7 +592,7 @@ void test_vloxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_m( @@ -613,7 +613,7 @@ void test_vloxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vloxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_m( @@ -655,7 +655,7 @@ void test_vloxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_m( @@ -676,7 +676,7 @@ void test_vloxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_m( @@ -697,7 +697,7 @@ void test_vloxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_m( @@ -718,7 +718,7 @@ void test_vloxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vloxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_m( @@ -760,7 +760,7 @@ void test_vloxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_m( @@ -781,7 +781,7 @@ void test_vloxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_m( @@ -802,7 +802,7 @@ void test_vloxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_m( @@ -823,7 +823,7 @@ void test_vloxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vloxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_m( @@ -865,7 +865,7 @@ void test_vloxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_m( @@ -886,7 +886,7 @@ void test_vloxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( @@ -907,7 +907,7 @@ void test_vloxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( @@ -928,7 +928,7 @@ void test_vloxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vloxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_m( @@ -970,7 +970,7 @@ void test_vloxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( @@ -991,7 +991,7 @@ void test_vloxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( @@ -1012,7 +1012,7 @@ void test_vloxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_m( @@ -1033,7 +1033,7 @@ void test_vloxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vloxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_m( @@ -1075,7 +1075,7 @@ void test_vloxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_m( @@ -1096,6 +1096,6 @@ void test_vloxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c index df1d7e03e22d..0ad1946a65ea 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei32.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2( @@ -46,7 +46,7 @@ void test_vloxseg6ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1( @@ -67,7 +67,7 @@ void test_vloxseg6ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2( @@ -88,7 +88,7 @@ void test_vloxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1( @@ -109,7 +109,7 @@ void test_vloxseg6ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1( @@ -130,7 +130,7 @@ void test_vloxseg6ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8( @@ -151,7 +151,7 @@ void test_vloxseg6ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4( @@ -172,7 +172,7 @@ void test_vloxseg6ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2( @@ -193,7 +193,7 @@ void test_vloxseg6ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1( @@ -214,7 +214,7 @@ void test_vloxseg6ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4( @@ -235,7 +235,7 @@ void test_vloxseg6ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2( @@ -256,7 +256,7 @@ void test_vloxseg6ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1( @@ -277,7 +277,7 @@ void test_vloxseg6ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2( @@ -298,7 +298,7 @@ void test_vloxseg6ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1( @@ -319,7 +319,7 @@ void test_vloxseg6ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1( @@ -340,7 +340,7 @@ void test_vloxseg6ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8( @@ -361,7 +361,7 @@ void test_vloxseg6ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4( @@ -382,7 +382,7 @@ void test_vloxseg6ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2( @@ -403,7 +403,7 @@ void test_vloxseg6ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1( @@ -424,7 +424,7 @@ void test_vloxseg6ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4( @@ -445,7 +445,7 @@ void test_vloxseg6ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2( @@ -466,7 +466,7 @@ void test_vloxseg6ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1( @@ -487,7 +487,7 @@ void test_vloxseg6ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2( @@ -508,7 +508,7 @@ void test_vloxseg6ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1( @@ -529,7 +529,7 @@ void test_vloxseg6ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1( @@ -550,7 +550,7 @@ void test_vloxseg6ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_m( @@ -571,7 +571,7 @@ void test_vloxseg6ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_m( @@ -592,7 +592,7 @@ void test_vloxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_m( @@ -613,7 +613,7 @@ void test_vloxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vloxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_m( @@ -655,7 +655,7 @@ void test_vloxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_m( @@ -676,7 +676,7 @@ void test_vloxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_m( @@ -697,7 +697,7 @@ void test_vloxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_m( @@ -718,7 +718,7 @@ void test_vloxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vloxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_m( @@ -760,7 +760,7 @@ void test_vloxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_m( @@ -781,7 +781,7 @@ void test_vloxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_m( @@ -802,7 +802,7 @@ void test_vloxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_m( @@ -823,7 +823,7 @@ void test_vloxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vloxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_m( @@ -865,7 +865,7 @@ void test_vloxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_m( @@ -886,7 +886,7 @@ void test_vloxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( @@ -907,7 +907,7 @@ void test_vloxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( @@ -928,7 +928,7 @@ void test_vloxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vloxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_m( @@ -970,7 +970,7 @@ void test_vloxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( @@ -991,7 +991,7 @@ void test_vloxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( @@ -1012,7 +1012,7 @@ void test_vloxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_m( @@ -1033,7 +1033,7 @@ void test_vloxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vloxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_m( @@ -1075,7 +1075,7 @@ void test_vloxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_m( @@ -1096,6 +1096,6 @@ void test_vloxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c index d6f384014a3c..192d30c44fb9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei64.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2( @@ -46,7 +46,7 @@ void test_vloxseg6ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1( @@ -67,7 +67,7 @@ void test_vloxseg6ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2( @@ -88,7 +88,7 @@ void test_vloxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1( @@ -109,7 +109,7 @@ void test_vloxseg6ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1( @@ -130,7 +130,7 @@ void test_vloxseg6ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8( @@ -151,7 +151,7 @@ void test_vloxseg6ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4( @@ -172,7 +172,7 @@ void test_vloxseg6ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2( @@ -193,7 +193,7 @@ void test_vloxseg6ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1( @@ -214,7 +214,7 @@ void test_vloxseg6ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4( @@ -235,7 +235,7 @@ void test_vloxseg6ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2( @@ -256,7 +256,7 @@ void test_vloxseg6ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1( @@ -277,7 +277,7 @@ void test_vloxseg6ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2( @@ -298,7 +298,7 @@ void test_vloxseg6ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1( @@ -319,7 +319,7 @@ void test_vloxseg6ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1( @@ -340,7 +340,7 @@ void test_vloxseg6ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8( @@ -361,7 +361,7 @@ void test_vloxseg6ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4( @@ -382,7 +382,7 @@ void test_vloxseg6ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2( @@ -403,7 +403,7 @@ void test_vloxseg6ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1( @@ -424,7 +424,7 @@ void test_vloxseg6ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4( @@ -445,7 +445,7 @@ void test_vloxseg6ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2( @@ -466,7 +466,7 @@ void test_vloxseg6ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1( @@ -487,7 +487,7 @@ void test_vloxseg6ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2( @@ -508,7 +508,7 @@ void test_vloxseg6ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1( @@ -529,7 +529,7 @@ void test_vloxseg6ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1( @@ -550,7 +550,7 @@ void test_vloxseg6ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_m( @@ -571,7 +571,7 @@ void test_vloxseg6ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_m( @@ -592,7 +592,7 @@ void test_vloxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_m( @@ -613,7 +613,7 @@ void test_vloxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vloxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_m( @@ -655,7 +655,7 @@ void test_vloxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_m( @@ -676,7 +676,7 @@ void test_vloxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_m( @@ -697,7 +697,7 @@ void test_vloxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_m( @@ -718,7 +718,7 @@ void test_vloxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vloxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_m( @@ -760,7 +760,7 @@ void test_vloxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_m( @@ -781,7 +781,7 @@ void test_vloxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_m( @@ -802,7 +802,7 @@ void test_vloxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_m( @@ -823,7 +823,7 @@ void test_vloxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vloxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_m( @@ -865,7 +865,7 @@ void test_vloxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_m( @@ -886,7 +886,7 @@ void test_vloxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( @@ -907,7 +907,7 @@ void test_vloxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( @@ -928,7 +928,7 @@ void test_vloxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vloxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_m( @@ -970,7 +970,7 @@ void test_vloxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( @@ -991,7 +991,7 @@ void test_vloxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( @@ -1012,7 +1012,7 @@ void test_vloxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_m( @@ -1033,7 +1033,7 @@ void test_vloxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vloxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_m( @@ -1075,7 +1075,7 @@ void test_vloxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_m( @@ -1096,6 +1096,6 @@ void test_vloxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c index 54fda1dfc846..ae911c83adc0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg6ei8.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2( @@ -46,7 +46,7 @@ void test_vloxseg6ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1( @@ -67,7 +67,7 @@ void test_vloxseg6ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2( @@ -88,7 +88,7 @@ void test_vloxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1( @@ -109,7 +109,7 @@ void test_vloxseg6ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1( @@ -130,7 +130,7 @@ void test_vloxseg6ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8( @@ -151,7 +151,7 @@ void test_vloxseg6ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4( @@ -172,7 +172,7 @@ void test_vloxseg6ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2( @@ -193,7 +193,7 @@ void test_vloxseg6ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1( @@ -214,7 +214,7 @@ void test_vloxseg6ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4( @@ -235,7 +235,7 @@ void test_vloxseg6ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2( @@ -256,7 +256,7 @@ void test_vloxseg6ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1( @@ -277,7 +277,7 @@ void test_vloxseg6ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2( @@ -298,7 +298,7 @@ void test_vloxseg6ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1( @@ -319,7 +319,7 @@ void test_vloxseg6ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1( @@ -340,7 +340,7 @@ void test_vloxseg6ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8( @@ -361,7 +361,7 @@ void test_vloxseg6ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4( @@ -382,7 +382,7 @@ void test_vloxseg6ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2( @@ -403,7 +403,7 @@ void test_vloxseg6ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1( @@ -424,7 +424,7 @@ void test_vloxseg6ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4( @@ -445,7 +445,7 @@ void test_vloxseg6ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2( @@ -466,7 +466,7 @@ void test_vloxseg6ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1( @@ -487,7 +487,7 @@ void test_vloxseg6ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2( @@ -508,7 +508,7 @@ void test_vloxseg6ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1( @@ -529,7 +529,7 @@ void test_vloxseg6ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1( @@ -550,7 +550,7 @@ void test_vloxseg6ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_m( @@ -571,7 +571,7 @@ void test_vloxseg6ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_m( @@ -592,7 +592,7 @@ void test_vloxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_m( @@ -613,7 +613,7 @@ void test_vloxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vloxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_m( @@ -655,7 +655,7 @@ void test_vloxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_m( @@ -676,7 +676,7 @@ void test_vloxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_m( @@ -697,7 +697,7 @@ void test_vloxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_m( @@ -718,7 +718,7 @@ void test_vloxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vloxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_m( @@ -760,7 +760,7 @@ void test_vloxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_m( @@ -781,7 +781,7 @@ void test_vloxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_m( @@ -802,7 +802,7 @@ void test_vloxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_m( @@ -823,7 +823,7 @@ void test_vloxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vloxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_m( @@ -865,7 +865,7 @@ void test_vloxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_m( @@ -886,7 +886,7 @@ void test_vloxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( @@ -907,7 +907,7 @@ void test_vloxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( @@ -928,7 +928,7 @@ void test_vloxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vloxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_m( @@ -970,7 +970,7 @@ void test_vloxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( @@ -991,7 +991,7 @@ void test_vloxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( @@ -1012,7 +1012,7 @@ void test_vloxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_m( @@ -1033,7 +1033,7 @@ void test_vloxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vloxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_m( @@ -1075,7 +1075,7 @@ void test_vloxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_m( @@ -1096,6 +1096,6 @@ void test_vloxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c index 5c57f94a7313..8e30aaf72c8e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei16.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2( @@ -50,7 +50,7 @@ void test_vloxseg7ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1( @@ -73,7 +73,7 @@ void test_vloxseg7ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2( @@ -96,7 +96,7 @@ void test_vloxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1( @@ -119,7 +119,7 @@ void test_vloxseg7ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1( @@ -142,7 +142,7 @@ void test_vloxseg7ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8( @@ -165,7 +165,7 @@ void test_vloxseg7ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4( @@ -188,7 +188,7 @@ void test_vloxseg7ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2( @@ -211,7 +211,7 @@ void test_vloxseg7ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1( @@ -234,7 +234,7 @@ void test_vloxseg7ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4( @@ -257,7 +257,7 @@ void test_vloxseg7ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2( @@ -280,7 +280,7 @@ void test_vloxseg7ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1( @@ -303,7 +303,7 @@ void test_vloxseg7ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2( @@ -326,7 +326,7 @@ void test_vloxseg7ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1( @@ -349,7 +349,7 @@ void test_vloxseg7ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1( @@ -372,7 +372,7 @@ void test_vloxseg7ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8( @@ -395,7 +395,7 @@ void test_vloxseg7ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4( @@ -418,7 +418,7 @@ void test_vloxseg7ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2( @@ -441,7 +441,7 @@ void test_vloxseg7ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1( @@ -464,7 +464,7 @@ void test_vloxseg7ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4( @@ -487,7 +487,7 @@ void test_vloxseg7ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2( @@ -510,7 +510,7 @@ void test_vloxseg7ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1( @@ -533,7 +533,7 @@ void test_vloxseg7ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2( @@ -556,7 +556,7 @@ void test_vloxseg7ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1( @@ -579,7 +579,7 @@ void test_vloxseg7ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1( @@ -602,7 +602,7 @@ void test_vloxseg7ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_m( @@ -625,7 +625,7 @@ void test_vloxseg7ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_m( @@ -648,7 +648,7 @@ void test_vloxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_m( @@ -671,7 +671,7 @@ void test_vloxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( @@ -694,7 +694,7 @@ void test_vloxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_m( @@ -717,7 +717,7 @@ void test_vloxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_m( @@ -740,7 +740,7 @@ void test_vloxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_m( @@ -763,7 +763,7 @@ void test_vloxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_m( @@ -786,7 +786,7 @@ void test_vloxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_m( @@ -809,7 +809,7 @@ void test_vloxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_m( @@ -832,7 +832,7 @@ void test_vloxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_m( @@ -855,7 +855,7 @@ void test_vloxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_m( @@ -878,7 +878,7 @@ void test_vloxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vloxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_m( @@ -924,7 +924,7 @@ void test_vloxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_m( @@ -947,7 +947,7 @@ void test_vloxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_m( @@ -970,7 +970,7 @@ void test_vloxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( @@ -993,7 +993,7 @@ void test_vloxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( @@ -1016,7 +1016,7 @@ void test_vloxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( @@ -1039,7 +1039,7 @@ void test_vloxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_m( @@ -1062,7 +1062,7 @@ void test_vloxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( @@ -1085,7 +1085,7 @@ void test_vloxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( @@ -1108,7 +1108,7 @@ void test_vloxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_m( @@ -1131,7 +1131,7 @@ void test_vloxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( @@ -1154,7 +1154,7 @@ void test_vloxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_m( @@ -1177,7 +1177,7 @@ void test_vloxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_m( @@ -1200,6 +1200,6 @@ void test_vloxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c index 3ae1a35ef9c2..6b6330a61453 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei32.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2( @@ -50,7 +50,7 @@ void test_vloxseg7ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1( @@ -73,7 +73,7 @@ void test_vloxseg7ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2( @@ -96,7 +96,7 @@ void test_vloxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1( @@ -119,7 +119,7 @@ void test_vloxseg7ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1( @@ -142,7 +142,7 @@ void test_vloxseg7ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8( @@ -165,7 +165,7 @@ void test_vloxseg7ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4( @@ -188,7 +188,7 @@ void test_vloxseg7ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2( @@ -211,7 +211,7 @@ void test_vloxseg7ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1( @@ -234,7 +234,7 @@ void test_vloxseg7ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4( @@ -257,7 +257,7 @@ void test_vloxseg7ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2( @@ -280,7 +280,7 @@ void test_vloxseg7ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1( @@ -303,7 +303,7 @@ void test_vloxseg7ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2( @@ -326,7 +326,7 @@ void test_vloxseg7ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1( @@ -349,7 +349,7 @@ void test_vloxseg7ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1( @@ -372,7 +372,7 @@ void test_vloxseg7ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8( @@ -395,7 +395,7 @@ void test_vloxseg7ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4( @@ -418,7 +418,7 @@ void test_vloxseg7ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2( @@ -441,7 +441,7 @@ void test_vloxseg7ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1( @@ -464,7 +464,7 @@ void test_vloxseg7ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4( @@ -487,7 +487,7 @@ void test_vloxseg7ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2( @@ -510,7 +510,7 @@ void test_vloxseg7ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1( @@ -533,7 +533,7 @@ void test_vloxseg7ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2( @@ -556,7 +556,7 @@ void test_vloxseg7ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1( @@ -579,7 +579,7 @@ void test_vloxseg7ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1( @@ -602,7 +602,7 @@ void test_vloxseg7ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_m( @@ -625,7 +625,7 @@ void test_vloxseg7ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_m( @@ -648,7 +648,7 @@ void test_vloxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_m( @@ -671,7 +671,7 @@ void test_vloxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( @@ -694,7 +694,7 @@ void test_vloxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_m( @@ -717,7 +717,7 @@ void test_vloxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_m( @@ -740,7 +740,7 @@ void test_vloxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_m( @@ -763,7 +763,7 @@ void test_vloxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_m( @@ -786,7 +786,7 @@ void test_vloxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_m( @@ -809,7 +809,7 @@ void test_vloxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_m( @@ -832,7 +832,7 @@ void test_vloxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_m( @@ -855,7 +855,7 @@ void test_vloxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_m( @@ -878,7 +878,7 @@ void test_vloxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vloxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_m( @@ -924,7 +924,7 @@ void test_vloxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_m( @@ -947,7 +947,7 @@ void test_vloxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_m( @@ -970,7 +970,7 @@ void test_vloxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( @@ -993,7 +993,7 @@ void test_vloxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( @@ -1016,7 +1016,7 @@ void test_vloxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( @@ -1039,7 +1039,7 @@ void test_vloxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_m( @@ -1062,7 +1062,7 @@ void test_vloxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( @@ -1085,7 +1085,7 @@ void test_vloxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( @@ -1108,7 +1108,7 @@ void test_vloxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_m( @@ -1131,7 +1131,7 @@ void test_vloxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( @@ -1154,7 +1154,7 @@ void test_vloxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_m( @@ -1177,7 +1177,7 @@ void test_vloxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_m( @@ -1200,6 +1200,6 @@ void test_vloxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c index 169d44a69ae2..4ec43c667fe8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei64.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2( @@ -50,7 +50,7 @@ void test_vloxseg7ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1( @@ -73,7 +73,7 @@ void test_vloxseg7ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2( @@ -96,7 +96,7 @@ void test_vloxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1( @@ -119,7 +119,7 @@ void test_vloxseg7ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1( @@ -142,7 +142,7 @@ void test_vloxseg7ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8( @@ -165,7 +165,7 @@ void test_vloxseg7ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4( @@ -188,7 +188,7 @@ void test_vloxseg7ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2( @@ -211,7 +211,7 @@ void test_vloxseg7ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1( @@ -234,7 +234,7 @@ void test_vloxseg7ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4( @@ -257,7 +257,7 @@ void test_vloxseg7ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2( @@ -280,7 +280,7 @@ void test_vloxseg7ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1( @@ -303,7 +303,7 @@ void test_vloxseg7ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2( @@ -326,7 +326,7 @@ void test_vloxseg7ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1( @@ -349,7 +349,7 @@ void test_vloxseg7ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1( @@ -372,7 +372,7 @@ void test_vloxseg7ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8( @@ -395,7 +395,7 @@ void test_vloxseg7ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4( @@ -418,7 +418,7 @@ void test_vloxseg7ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2( @@ -441,7 +441,7 @@ void test_vloxseg7ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1( @@ -464,7 +464,7 @@ void test_vloxseg7ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4( @@ -487,7 +487,7 @@ void test_vloxseg7ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2( @@ -510,7 +510,7 @@ void test_vloxseg7ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1( @@ -533,7 +533,7 @@ void test_vloxseg7ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2( @@ -556,7 +556,7 @@ void test_vloxseg7ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1( @@ -579,7 +579,7 @@ void test_vloxseg7ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1( @@ -602,7 +602,7 @@ void test_vloxseg7ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_m( @@ -625,7 +625,7 @@ void test_vloxseg7ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_m( @@ -648,7 +648,7 @@ void test_vloxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_m( @@ -671,7 +671,7 @@ void test_vloxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( @@ -694,7 +694,7 @@ void test_vloxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_m( @@ -717,7 +717,7 @@ void test_vloxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_m( @@ -740,7 +740,7 @@ void test_vloxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_m( @@ -763,7 +763,7 @@ void test_vloxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_m( @@ -786,7 +786,7 @@ void test_vloxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_m( @@ -809,7 +809,7 @@ void test_vloxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_m( @@ -832,7 +832,7 @@ void test_vloxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_m( @@ -855,7 +855,7 @@ void test_vloxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_m( @@ -878,7 +878,7 @@ void test_vloxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vloxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_m( @@ -924,7 +924,7 @@ void test_vloxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_m( @@ -947,7 +947,7 @@ void test_vloxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_m( @@ -970,7 +970,7 @@ void test_vloxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( @@ -993,7 +993,7 @@ void test_vloxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( @@ -1016,7 +1016,7 @@ void test_vloxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( @@ -1039,7 +1039,7 @@ void test_vloxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_m( @@ -1062,7 +1062,7 @@ void test_vloxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( @@ -1085,7 +1085,7 @@ void test_vloxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( @@ -1108,7 +1108,7 @@ void test_vloxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_m( @@ -1131,7 +1131,7 @@ void test_vloxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( @@ -1154,7 +1154,7 @@ void test_vloxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_m( @@ -1177,7 +1177,7 @@ void test_vloxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_m( @@ -1200,6 +1200,6 @@ void test_vloxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c index 0981fb671987..28438098eca2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg7ei8.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2( @@ -50,7 +50,7 @@ void test_vloxseg7ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1( @@ -73,7 +73,7 @@ void test_vloxseg7ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2( @@ -96,7 +96,7 @@ void test_vloxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1( @@ -119,7 +119,7 @@ void test_vloxseg7ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1( @@ -142,7 +142,7 @@ void test_vloxseg7ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8( @@ -165,7 +165,7 @@ void test_vloxseg7ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4( @@ -188,7 +188,7 @@ void test_vloxseg7ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2( @@ -211,7 +211,7 @@ void test_vloxseg7ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1( @@ -234,7 +234,7 @@ void test_vloxseg7ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4( @@ -257,7 +257,7 @@ void test_vloxseg7ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2( @@ -280,7 +280,7 @@ void test_vloxseg7ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1( @@ -303,7 +303,7 @@ void test_vloxseg7ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2( @@ -326,7 +326,7 @@ void test_vloxseg7ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1( @@ -349,7 +349,7 @@ void test_vloxseg7ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1( @@ -372,7 +372,7 @@ void test_vloxseg7ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8( @@ -395,7 +395,7 @@ void test_vloxseg7ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4( @@ -418,7 +418,7 @@ void test_vloxseg7ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2( @@ -441,7 +441,7 @@ void test_vloxseg7ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1( @@ -464,7 +464,7 @@ void test_vloxseg7ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4( @@ -487,7 +487,7 @@ void test_vloxseg7ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2( @@ -510,7 +510,7 @@ void test_vloxseg7ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1( @@ -533,7 +533,7 @@ void test_vloxseg7ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2( @@ -556,7 +556,7 @@ void test_vloxseg7ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1( @@ -579,7 +579,7 @@ void test_vloxseg7ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1( @@ -602,7 +602,7 @@ void test_vloxseg7ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_m( @@ -625,7 +625,7 @@ void test_vloxseg7ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_m( @@ -648,7 +648,7 @@ void test_vloxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_m( @@ -671,7 +671,7 @@ void test_vloxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( @@ -694,7 +694,7 @@ void test_vloxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_m( @@ -717,7 +717,7 @@ void test_vloxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_m( @@ -740,7 +740,7 @@ void test_vloxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_m( @@ -763,7 +763,7 @@ void test_vloxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_m( @@ -786,7 +786,7 @@ void test_vloxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_m( @@ -809,7 +809,7 @@ void test_vloxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_m( @@ -832,7 +832,7 @@ void test_vloxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_m( @@ -855,7 +855,7 @@ void test_vloxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_m( @@ -878,7 +878,7 @@ void test_vloxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vloxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_m( @@ -924,7 +924,7 @@ void test_vloxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_m( @@ -947,7 +947,7 @@ void test_vloxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_m( @@ -970,7 +970,7 @@ void test_vloxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( @@ -993,7 +993,7 @@ void test_vloxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( @@ -1016,7 +1016,7 @@ void test_vloxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( @@ -1039,7 +1039,7 @@ void test_vloxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_m( @@ -1062,7 +1062,7 @@ void test_vloxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( @@ -1085,7 +1085,7 @@ void test_vloxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( @@ -1108,7 +1108,7 @@ void test_vloxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_m( @@ -1131,7 +1131,7 @@ void test_vloxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( @@ -1154,7 +1154,7 @@ void test_vloxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_m( @@ -1177,7 +1177,7 @@ void test_vloxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_m( @@ -1200,6 +1200,6 @@ void test_vloxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c index f1d1e6d80d02..78adb7be7485 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei16.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2( @@ -54,7 +54,7 @@ void test_vloxseg8ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1( @@ -79,7 +79,7 @@ void test_vloxseg8ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2( @@ -104,7 +104,7 @@ void test_vloxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1( @@ -129,7 +129,7 @@ void test_vloxseg8ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1( @@ -154,7 +154,7 @@ void test_vloxseg8ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8( @@ -179,7 +179,7 @@ void test_vloxseg8ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4( @@ -204,7 +204,7 @@ void test_vloxseg8ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2( @@ -229,7 +229,7 @@ void test_vloxseg8ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1( @@ -254,7 +254,7 @@ void test_vloxseg8ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4( @@ -279,7 +279,7 @@ void test_vloxseg8ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2( @@ -304,7 +304,7 @@ void test_vloxseg8ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1( @@ -329,7 +329,7 @@ void test_vloxseg8ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2( @@ -354,7 +354,7 @@ void test_vloxseg8ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1( @@ -379,7 +379,7 @@ void test_vloxseg8ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1( @@ -404,7 +404,7 @@ void test_vloxseg8ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8( @@ -429,7 +429,7 @@ void test_vloxseg8ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4( @@ -454,7 +454,7 @@ void test_vloxseg8ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2( @@ -479,7 +479,7 @@ void test_vloxseg8ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1( @@ -504,7 +504,7 @@ void test_vloxseg8ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4( @@ -529,7 +529,7 @@ void test_vloxseg8ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2( @@ -554,7 +554,7 @@ void test_vloxseg8ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1( @@ -579,7 +579,7 @@ void test_vloxseg8ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2( @@ -604,7 +604,7 @@ void test_vloxseg8ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1( @@ -629,7 +629,7 @@ void test_vloxseg8ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1( @@ -654,7 +654,7 @@ void test_vloxseg8ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_m( @@ -679,7 +679,7 @@ void test_vloxseg8ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_m( @@ -704,7 +704,7 @@ void test_vloxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_m( @@ -729,7 +729,7 @@ void test_vloxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( @@ -754,7 +754,7 @@ void test_vloxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_m( @@ -779,7 +779,7 @@ void test_vloxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_m( @@ -804,7 +804,7 @@ void test_vloxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_m( @@ -829,7 +829,7 @@ void test_vloxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_m( @@ -854,7 +854,7 @@ void test_vloxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_m( @@ -879,7 +879,7 @@ void test_vloxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_m( @@ -904,7 +904,7 @@ void test_vloxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_m( @@ -929,7 +929,7 @@ void test_vloxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_m( @@ -954,7 +954,7 @@ void test_vloxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_m( @@ -979,7 +979,7 @@ void test_vloxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_m( @@ -1004,7 +1004,7 @@ void test_vloxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_m( @@ -1029,7 +1029,7 @@ void test_vloxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_m( @@ -1054,7 +1054,7 @@ void test_vloxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( @@ -1079,7 +1079,7 @@ void test_vloxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( @@ -1104,7 +1104,7 @@ void test_vloxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( @@ -1129,7 +1129,7 @@ void test_vloxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_m( @@ -1154,7 +1154,7 @@ void test_vloxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( @@ -1179,7 +1179,7 @@ void test_vloxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( @@ -1204,7 +1204,7 @@ void test_vloxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_m( @@ -1229,7 +1229,7 @@ void test_vloxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( @@ -1254,7 +1254,7 @@ void test_vloxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_m( @@ -1279,7 +1279,7 @@ void test_vloxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_m( @@ -1304,6 +1304,6 @@ void test_vloxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c index 82bb2d4aff18..4ff572396f2f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei32.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2( @@ -54,7 +54,7 @@ void test_vloxseg8ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1( @@ -79,7 +79,7 @@ void test_vloxseg8ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2( @@ -104,7 +104,7 @@ void test_vloxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1( @@ -129,7 +129,7 @@ void test_vloxseg8ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1( @@ -154,7 +154,7 @@ void test_vloxseg8ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8( @@ -179,7 +179,7 @@ void test_vloxseg8ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4( @@ -204,7 +204,7 @@ void test_vloxseg8ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2( @@ -229,7 +229,7 @@ void test_vloxseg8ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1( @@ -254,7 +254,7 @@ void test_vloxseg8ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4( @@ -279,7 +279,7 @@ void test_vloxseg8ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2( @@ -304,7 +304,7 @@ void test_vloxseg8ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1( @@ -329,7 +329,7 @@ void test_vloxseg8ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2( @@ -354,7 +354,7 @@ void test_vloxseg8ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1( @@ -379,7 +379,7 @@ void test_vloxseg8ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1( @@ -404,7 +404,7 @@ void test_vloxseg8ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8( @@ -429,7 +429,7 @@ void test_vloxseg8ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4( @@ -454,7 +454,7 @@ void test_vloxseg8ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2( @@ -479,7 +479,7 @@ void test_vloxseg8ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1( @@ -504,7 +504,7 @@ void test_vloxseg8ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4( @@ -529,7 +529,7 @@ void test_vloxseg8ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2( @@ -554,7 +554,7 @@ void test_vloxseg8ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1( @@ -579,7 +579,7 @@ void test_vloxseg8ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2( @@ -604,7 +604,7 @@ void test_vloxseg8ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1( @@ -629,7 +629,7 @@ void test_vloxseg8ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1( @@ -654,7 +654,7 @@ void test_vloxseg8ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_m( @@ -679,7 +679,7 @@ void test_vloxseg8ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_m( @@ -704,7 +704,7 @@ void test_vloxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_m( @@ -729,7 +729,7 @@ void test_vloxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( @@ -754,7 +754,7 @@ void test_vloxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_m( @@ -779,7 +779,7 @@ void test_vloxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_m( @@ -804,7 +804,7 @@ void test_vloxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_m( @@ -829,7 +829,7 @@ void test_vloxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_m( @@ -854,7 +854,7 @@ void test_vloxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_m( @@ -879,7 +879,7 @@ void test_vloxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_m( @@ -904,7 +904,7 @@ void test_vloxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_m( @@ -929,7 +929,7 @@ void test_vloxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_m( @@ -954,7 +954,7 @@ void test_vloxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_m( @@ -979,7 +979,7 @@ void test_vloxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_m( @@ -1004,7 +1004,7 @@ void test_vloxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_m( @@ -1029,7 +1029,7 @@ void test_vloxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_m( @@ -1054,7 +1054,7 @@ void test_vloxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( @@ -1079,7 +1079,7 @@ void test_vloxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( @@ -1104,7 +1104,7 @@ void test_vloxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( @@ -1129,7 +1129,7 @@ void test_vloxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_m( @@ -1154,7 +1154,7 @@ void test_vloxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( @@ -1179,7 +1179,7 @@ void test_vloxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( @@ -1204,7 +1204,7 @@ void test_vloxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_m( @@ -1229,7 +1229,7 @@ void test_vloxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( @@ -1254,7 +1254,7 @@ void test_vloxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_m( @@ -1279,7 +1279,7 @@ void test_vloxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_m( @@ -1304,6 +1304,6 @@ void test_vloxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c index 8223bf58a311..d6174c19b876 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei64.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2( @@ -54,7 +54,7 @@ void test_vloxseg8ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1( @@ -79,7 +79,7 @@ void test_vloxseg8ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2( @@ -104,7 +104,7 @@ void test_vloxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1( @@ -129,7 +129,7 @@ void test_vloxseg8ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1( @@ -154,7 +154,7 @@ void test_vloxseg8ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8( @@ -179,7 +179,7 @@ void test_vloxseg8ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4( @@ -204,7 +204,7 @@ void test_vloxseg8ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2( @@ -229,7 +229,7 @@ void test_vloxseg8ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1( @@ -254,7 +254,7 @@ void test_vloxseg8ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4( @@ -279,7 +279,7 @@ void test_vloxseg8ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2( @@ -304,7 +304,7 @@ void test_vloxseg8ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1( @@ -329,7 +329,7 @@ void test_vloxseg8ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2( @@ -354,7 +354,7 @@ void test_vloxseg8ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1( @@ -379,7 +379,7 @@ void test_vloxseg8ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1( @@ -404,7 +404,7 @@ void test_vloxseg8ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8( @@ -429,7 +429,7 @@ void test_vloxseg8ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4( @@ -454,7 +454,7 @@ void test_vloxseg8ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2( @@ -479,7 +479,7 @@ void test_vloxseg8ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1( @@ -504,7 +504,7 @@ void test_vloxseg8ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4( @@ -529,7 +529,7 @@ void test_vloxseg8ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2( @@ -554,7 +554,7 @@ void test_vloxseg8ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1( @@ -579,7 +579,7 @@ void test_vloxseg8ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2( @@ -604,7 +604,7 @@ void test_vloxseg8ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1( @@ -629,7 +629,7 @@ void test_vloxseg8ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1( @@ -654,7 +654,7 @@ void test_vloxseg8ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_m( @@ -679,7 +679,7 @@ void test_vloxseg8ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_m( @@ -704,7 +704,7 @@ void test_vloxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_m( @@ -729,7 +729,7 @@ void test_vloxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( @@ -754,7 +754,7 @@ void test_vloxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_m( @@ -779,7 +779,7 @@ void test_vloxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_m( @@ -804,7 +804,7 @@ void test_vloxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_m( @@ -829,7 +829,7 @@ void test_vloxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_m( @@ -854,7 +854,7 @@ void test_vloxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_m( @@ -879,7 +879,7 @@ void test_vloxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_m( @@ -904,7 +904,7 @@ void test_vloxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_m( @@ -929,7 +929,7 @@ void test_vloxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_m( @@ -954,7 +954,7 @@ void test_vloxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_m( @@ -979,7 +979,7 @@ void test_vloxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_m( @@ -1004,7 +1004,7 @@ void test_vloxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_m( @@ -1029,7 +1029,7 @@ void test_vloxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_m( @@ -1054,7 +1054,7 @@ void test_vloxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( @@ -1079,7 +1079,7 @@ void test_vloxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( @@ -1104,7 +1104,7 @@ void test_vloxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( @@ -1129,7 +1129,7 @@ void test_vloxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_m( @@ -1154,7 +1154,7 @@ void test_vloxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( @@ -1179,7 +1179,7 @@ void test_vloxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( @@ -1204,7 +1204,7 @@ void test_vloxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_m( @@ -1229,7 +1229,7 @@ void test_vloxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( @@ -1254,7 +1254,7 @@ void test_vloxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_m( @@ -1279,7 +1279,7 @@ void test_vloxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_m( @@ -1304,6 +1304,6 @@ void test_vloxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c index 94f22919068c..63d9c63145c2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vloxseg8ei8.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2( @@ -54,7 +54,7 @@ void test_vloxseg8ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1( @@ -79,7 +79,7 @@ void test_vloxseg8ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2( @@ -104,7 +104,7 @@ void test_vloxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1( @@ -129,7 +129,7 @@ void test_vloxseg8ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1( @@ -154,7 +154,7 @@ void test_vloxseg8ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8( @@ -179,7 +179,7 @@ void test_vloxseg8ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4( @@ -204,7 +204,7 @@ void test_vloxseg8ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2( @@ -229,7 +229,7 @@ void test_vloxseg8ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1( @@ -254,7 +254,7 @@ void test_vloxseg8ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4( @@ -279,7 +279,7 @@ void test_vloxseg8ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2( @@ -304,7 +304,7 @@ void test_vloxseg8ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1( @@ -329,7 +329,7 @@ void test_vloxseg8ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2( @@ -354,7 +354,7 @@ void test_vloxseg8ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1( @@ -379,7 +379,7 @@ void test_vloxseg8ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1( @@ -404,7 +404,7 @@ void test_vloxseg8ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8( @@ -429,7 +429,7 @@ void test_vloxseg8ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4( @@ -454,7 +454,7 @@ void test_vloxseg8ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2( @@ -479,7 +479,7 @@ void test_vloxseg8ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1( @@ -504,7 +504,7 @@ void test_vloxseg8ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4( @@ -529,7 +529,7 @@ void test_vloxseg8ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2( @@ -554,7 +554,7 @@ void test_vloxseg8ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1( @@ -579,7 +579,7 @@ void test_vloxseg8ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2( @@ -604,7 +604,7 @@ void test_vloxseg8ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1( @@ -629,7 +629,7 @@ void test_vloxseg8ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1( @@ -654,7 +654,7 @@ void test_vloxseg8ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_m( @@ -679,7 +679,7 @@ void test_vloxseg8ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_m( @@ -704,7 +704,7 @@ void test_vloxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_m( @@ -729,7 +729,7 @@ void test_vloxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( @@ -754,7 +754,7 @@ void test_vloxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_m( @@ -779,7 +779,7 @@ void test_vloxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_m( @@ -804,7 +804,7 @@ void test_vloxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_m( @@ -829,7 +829,7 @@ void test_vloxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_m( @@ -854,7 +854,7 @@ void test_vloxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_m( @@ -879,7 +879,7 @@ void test_vloxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_m( @@ -904,7 +904,7 @@ void test_vloxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_m( @@ -929,7 +929,7 @@ void test_vloxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_m( @@ -954,7 +954,7 @@ void test_vloxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_m( @@ -979,7 +979,7 @@ void test_vloxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_m( @@ -1004,7 +1004,7 @@ void test_vloxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_m( @@ -1029,7 +1029,7 @@ void test_vloxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_m( @@ -1054,7 +1054,7 @@ void test_vloxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( @@ -1079,7 +1079,7 @@ void test_vloxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( @@ -1104,7 +1104,7 @@ void test_vloxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( @@ -1129,7 +1129,7 @@ void test_vloxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_m( @@ -1154,7 +1154,7 @@ void test_vloxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( @@ -1179,7 +1179,7 @@ void test_vloxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( @@ -1204,7 +1204,7 @@ void test_vloxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_m( @@ -1229,7 +1229,7 @@ void test_vloxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( @@ -1254,7 +1254,7 @@ void test_vloxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_m( @@ -1279,7 +1279,7 @@ void test_vloxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_m( @@ -1304,6 +1304,6 @@ void test_vloxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse16.c index cbd86fe31c65..c11d1effcbb3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlse16_v_f16mf4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf4(base, bstride, vl); + return __riscv_vlse16_v_f16mf4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vlse16_v_f16mf4(const _Float16 *base, ptrdiff_t bstride, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlse16_v_f16mf2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf2(base, bstride, vl); + return __riscv_vlse16_v_f16mf2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vlse16_v_f16mf2(const _Float16 *base, ptrdiff_t bstride, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlse16_v_f16m1(const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m1(base, bstride, vl); + return __riscv_vlse16_v_f16m1(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vlse16_v_f16m1(const _Float16 *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlse16_v_f16m2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m2(base, bstride, vl); + return __riscv_vlse16_v_f16m2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vlse16_v_f16m2(const _Float16 *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlse16_v_f16m4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m4(base, bstride, vl); + return __riscv_vlse16_v_f16m4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vlse16_v_f16m4(const _Float16 *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlse16_v_f16m8(const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m8(base, bstride, vl); + return __riscv_vlse16_v_f16m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4( @@ -67,7 +67,7 @@ vfloat16m8_t test_vlse16_v_f16m8(const _Float16 *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlse16_v_i16mf4(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf4(base, bstride, vl); + return __riscv_vlse16_v_i16mf4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2( @@ -76,7 +76,7 @@ vint16mf4_t test_vlse16_v_i16mf4(const int16_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlse16_v_i16mf2(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf2(base, bstride, vl); + return __riscv_vlse16_v_i16mf2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m1( @@ -85,7 +85,7 @@ vint16mf2_t test_vlse16_v_i16mf2(const int16_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlse16_v_i16m1(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m1(base, bstride, vl); + return __riscv_vlse16_v_i16m1(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m2( @@ -94,7 +94,7 @@ vint16m1_t test_vlse16_v_i16m1(const int16_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlse16_v_i16m2(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m2(base, bstride, vl); + return __riscv_vlse16_v_i16m2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m4( @@ -103,7 +103,7 @@ vint16m2_t test_vlse16_v_i16m2(const int16_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlse16_v_i16m4(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m4(base, bstride, vl); + return __riscv_vlse16_v_i16m4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m8( @@ -112,7 +112,7 @@ vint16m4_t test_vlse16_v_i16m4(const int16_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlse16_v_i16m8(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m8(base, bstride, vl); + return __riscv_vlse16_v_i16m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4( @@ -121,7 +121,7 @@ vint16m8_t test_vlse16_v_i16m8(const int16_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlse16_v_u16mf4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf4(base, bstride, vl); + return __riscv_vlse16_v_u16mf4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2( @@ -130,7 +130,7 @@ vuint16mf4_t test_vlse16_v_u16mf4(const uint16_t *base, ptrdiff_t bstride, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlse16_v_u16mf2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf2(base, bstride, vl); + return __riscv_vlse16_v_u16mf2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m1( @@ -139,7 +139,7 @@ vuint16mf2_t test_vlse16_v_u16mf2(const uint16_t *base, ptrdiff_t bstride, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlse16_v_u16m1(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m1(base, bstride, vl); + return __riscv_vlse16_v_u16m1(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m2( @@ -148,7 +148,7 @@ vuint16m1_t test_vlse16_v_u16m1(const uint16_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlse16_v_u16m2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m2(base, bstride, vl); + return __riscv_vlse16_v_u16m2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m4( @@ -157,7 +157,7 @@ vuint16m2_t test_vlse16_v_u16m2(const uint16_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlse16_v_u16m4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m4(base, bstride, vl); + return __riscv_vlse16_v_u16m4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m8( @@ -166,7 +166,7 @@ vuint16m4_t test_vlse16_v_u16m4(const uint16_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlse16_v_u16m8(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m8(base, bstride, vl); + return __riscv_vlse16_v_u16m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_m( @@ -175,7 +175,7 @@ vuint16m8_t test_vlse16_v_u16m8(const uint16_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf4_m(mask, base, bstride, vl); + return __riscv_vlse16_v_f16mf4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_m( @@ -184,7 +184,7 @@ vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, ptrdi // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf2_m(mask, base, bstride, vl); + return __riscv_vlse16_v_f16mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m1_m( @@ -193,7 +193,7 @@ vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, ptrdi // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m1_m(mask, base, bstride, vl); + return __riscv_vlse16_v_f16m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m2_m( @@ -202,7 +202,7 @@ vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, const _Float16 *base, ptrdiff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m2_m(mask, base, bstride, vl); + return __riscv_vlse16_v_f16m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m4_m( @@ -211,7 +211,7 @@ vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, const _Float16 *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m4_m(mask, base, bstride, vl); + return __riscv_vlse16_v_f16m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m8_m( @@ -220,7 +220,7 @@ vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, const _Float16 *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m8_m(mask, base, bstride, vl); + return __riscv_vlse16_v_f16m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m( @@ -229,7 +229,7 @@ vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, const _Float16 *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf4_m(mask, base, bstride, vl); + return __riscv_vlse16_v_i16mf4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m( @@ -238,7 +238,7 @@ vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, const int16_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf2_m(mask, base, bstride, vl); + return __riscv_vlse16_v_i16mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m( @@ -247,7 +247,7 @@ vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, const int16_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m1_m(mask, base, bstride, vl); + return __riscv_vlse16_v_i16m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m( @@ -256,7 +256,7 @@ vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, const int16_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m2_m(mask, base, bstride, vl); + return __riscv_vlse16_v_i16m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m( @@ -265,7 +265,7 @@ vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, const int16_t *base, ptrdiff_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m4_m(mask, base, bstride, vl); + return __riscv_vlse16_v_i16m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m( @@ -274,7 +274,7 @@ vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, const int16_t *base, ptrdiff_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m8_m(mask, base, bstride, vl); + return __riscv_vlse16_v_i16m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m( @@ -283,7 +283,7 @@ vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, const int16_t *base, ptrdiff_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf4_m(mask, base, bstride, vl); + return __riscv_vlse16_v_u16mf4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m( @@ -292,7 +292,7 @@ vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, ptrdif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf2_m(mask, base, bstride, vl); + return __riscv_vlse16_v_u16mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m( @@ -301,7 +301,7 @@ vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, ptrdif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m1_m(mask, base, bstride, vl); + return __riscv_vlse16_v_u16m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m( @@ -310,7 +310,7 @@ vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, const uint16_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m2_m(mask, base, bstride, vl); + return __riscv_vlse16_v_u16m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m( @@ -319,7 +319,7 @@ vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m4_m(mask, base, bstride, vl); + return __riscv_vlse16_v_u16m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m( @@ -328,6 +328,6 @@ vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, const uint16_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m8_m(mask, base, bstride, vl); + return __riscv_vlse16_v_u16m8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse32.c index d1f36d2e11b9..3f5b7763754a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlse32_v_f32mf2(const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32mf2(base, bstride, vl); + return __riscv_vlse32_v_f32mf2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m1( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2(const float *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlse32_v_f32m1(const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m1(base, bstride, vl); + return __riscv_vlse32_v_f32m1(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m2( @@ -31,7 +31,7 @@ vfloat32m1_t test_vlse32_v_f32m1(const float *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlse32_v_f32m2(const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m2(base, bstride, vl); + return __riscv_vlse32_v_f32m2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m4( @@ -40,7 +40,7 @@ vfloat32m2_t test_vlse32_v_f32m2(const float *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlse32_v_f32m4(const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m4(base, bstride, vl); + return __riscv_vlse32_v_f32m4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m8( @@ -49,7 +49,7 @@ vfloat32m4_t test_vlse32_v_f32m4(const float *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlse32_v_f32m8(const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m8(base, bstride, vl); + return __riscv_vlse32_v_f32m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2( @@ -58,7 +58,7 @@ vfloat32m8_t test_vlse32_v_f32m8(const float *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlse32_v_i32mf2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32mf2(base, bstride, vl); + return __riscv_vlse32_v_i32mf2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m1( @@ -67,7 +67,7 @@ vint32mf2_t test_vlse32_v_i32mf2(const int32_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlse32_v_i32m1(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m1(base, bstride, vl); + return __riscv_vlse32_v_i32m1(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m2( @@ -76,7 +76,7 @@ vint32m1_t test_vlse32_v_i32m1(const int32_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlse32_v_i32m2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m2(base, bstride, vl); + return __riscv_vlse32_v_i32m2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m4( @@ -85,7 +85,7 @@ vint32m2_t test_vlse32_v_i32m2(const int32_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlse32_v_i32m4(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m4(base, bstride, vl); + return __riscv_vlse32_v_i32m4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m8( @@ -94,7 +94,7 @@ vint32m4_t test_vlse32_v_i32m4(const int32_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlse32_v_i32m8(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m8(base, bstride, vl); + return __riscv_vlse32_v_i32m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2( @@ -103,7 +103,7 @@ vint32m8_t test_vlse32_v_i32m8(const int32_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlse32_v_u32mf2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32mf2(base, bstride, vl); + return __riscv_vlse32_v_u32mf2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m1( @@ -112,7 +112,7 @@ vuint32mf2_t test_vlse32_v_u32mf2(const uint32_t *base, ptrdiff_t bstride, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlse32_v_u32m1(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m1(base, bstride, vl); + return __riscv_vlse32_v_u32m1(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m2( @@ -121,7 +121,7 @@ vuint32m1_t test_vlse32_v_u32m1(const uint32_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlse32_v_u32m2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m2(base, bstride, vl); + return __riscv_vlse32_v_u32m2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m4( @@ -130,7 +130,7 @@ vuint32m2_t test_vlse32_v_u32m2(const uint32_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlse32_v_u32m4(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m4(base, bstride, vl); + return __riscv_vlse32_v_u32m4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m8( @@ -139,7 +139,7 @@ vuint32m4_t test_vlse32_v_u32m4(const uint32_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlse32_v_u32m8(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m8(base, bstride, vl); + return __riscv_vlse32_v_u32m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m( @@ -148,7 +148,7 @@ vuint32m8_t test_vlse32_v_u32m8(const uint32_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32mf2_m(mask, base, bstride, vl); + return __riscv_vlse32_v_f32mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m( @@ -157,7 +157,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, const float *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m1_m(mask, base, bstride, vl); + return __riscv_vlse32_v_f32m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m( @@ -166,7 +166,7 @@ vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, const float *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m2_m(mask, base, bstride, vl); + return __riscv_vlse32_v_f32m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m( @@ -175,7 +175,7 @@ vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, const float *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m4_m(mask, base, bstride, vl); + return __riscv_vlse32_v_f32m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m( @@ -184,7 +184,7 @@ vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, const float *base, ptrdiff_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m8_m(mask, base, bstride, vl); + return __riscv_vlse32_v_f32m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m( @@ -193,7 +193,7 @@ vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, const float *base, ptrdiff_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32mf2_m(mask, base, bstride, vl); + return __riscv_vlse32_v_i32mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m( @@ -202,7 +202,7 @@ vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, const int32_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m1_m(mask, base, bstride, vl); + return __riscv_vlse32_v_i32m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m( @@ -211,7 +211,7 @@ vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, const int32_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m2_m(mask, base, bstride, vl); + return __riscv_vlse32_v_i32m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m( @@ -220,7 +220,7 @@ vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, const int32_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m4_m(mask, base, bstride, vl); + return __riscv_vlse32_v_i32m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m( @@ -229,7 +229,7 @@ vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, const int32_t *base, ptrdiff_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m8_m(mask, base, bstride, vl); + return __riscv_vlse32_v_i32m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m( @@ -238,7 +238,7 @@ vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, const int32_t *base, ptrdiff_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32mf2_m(mask, base, bstride, vl); + return __riscv_vlse32_v_u32mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m( @@ -247,7 +247,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, ptrdif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m1_m(mask, base, bstride, vl); + return __riscv_vlse32_v_u32m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m( @@ -256,7 +256,7 @@ vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, const uint32_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m2_m(mask, base, bstride, vl); + return __riscv_vlse32_v_u32m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m( @@ -265,7 +265,7 @@ vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, const uint32_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m4_m(mask, base, bstride, vl); + return __riscv_vlse32_v_u32m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m( @@ -274,6 +274,6 @@ vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, const uint32_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m8_m(mask, base, bstride, vl); + return __riscv_vlse32_v_u32m8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse64.c index 7cb6a6223772..2ea5a9b65537 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlse64_v_f64m1(const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m1(base, bstride, vl); + return __riscv_vlse64_v_f64m1(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m2( @@ -22,7 +22,7 @@ vfloat64m1_t test_vlse64_v_f64m1(const double *base, ptrdiff_t bstride, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlse64_v_f64m2(const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m2(base, bstride, vl); + return __riscv_vlse64_v_f64m2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m4( @@ -31,7 +31,7 @@ vfloat64m2_t test_vlse64_v_f64m2(const double *base, ptrdiff_t bstride, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlse64_v_f64m4(const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m4(base, bstride, vl); + return __riscv_vlse64_v_f64m4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m8( @@ -40,7 +40,7 @@ vfloat64m4_t test_vlse64_v_f64m4(const double *base, ptrdiff_t bstride, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlse64_v_f64m8(const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m8(base, bstride, vl); + return __riscv_vlse64_v_f64m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m1( @@ -49,7 +49,7 @@ vfloat64m8_t test_vlse64_v_f64m8(const double *base, ptrdiff_t bstride, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlse64_v_i64m1(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m1(base, bstride, vl); + return __riscv_vlse64_v_i64m1(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m2( @@ -58,7 +58,7 @@ vint64m1_t test_vlse64_v_i64m1(const int64_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlse64_v_i64m2(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m2(base, bstride, vl); + return __riscv_vlse64_v_i64m2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m4( @@ -67,7 +67,7 @@ vint64m2_t test_vlse64_v_i64m2(const int64_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlse64_v_i64m4(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m4(base, bstride, vl); + return __riscv_vlse64_v_i64m4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m8( @@ -76,7 +76,7 @@ vint64m4_t test_vlse64_v_i64m4(const int64_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlse64_v_i64m8(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m8(base, bstride, vl); + return __riscv_vlse64_v_i64m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m1( @@ -85,7 +85,7 @@ vint64m8_t test_vlse64_v_i64m8(const int64_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlse64_v_u64m1(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m1(base, bstride, vl); + return __riscv_vlse64_v_u64m1(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m2( @@ -94,7 +94,7 @@ vuint64m1_t test_vlse64_v_u64m1(const uint64_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlse64_v_u64m2(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m2(base, bstride, vl); + return __riscv_vlse64_v_u64m2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m4( @@ -103,7 +103,7 @@ vuint64m2_t test_vlse64_v_u64m2(const uint64_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlse64_v_u64m4(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m4(base, bstride, vl); + return __riscv_vlse64_v_u64m4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m8( @@ -112,7 +112,7 @@ vuint64m4_t test_vlse64_v_u64m4(const uint64_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlse64_v_u64m8(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m8(base, bstride, vl); + return __riscv_vlse64_v_u64m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m( @@ -121,7 +121,7 @@ vuint64m8_t test_vlse64_v_u64m8(const uint64_t *base, ptrdiff_t bstride, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m1_m(mask, base, bstride, vl); + return __riscv_vlse64_v_f64m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m( @@ -130,7 +130,7 @@ vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, const double *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m2_m(mask, base, bstride, vl); + return __riscv_vlse64_v_f64m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m( @@ -139,7 +139,7 @@ vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, const double *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m4_m(mask, base, bstride, vl); + return __riscv_vlse64_v_f64m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m( @@ -148,7 +148,7 @@ vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, const double *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m8_m(mask, base, bstride, vl); + return __riscv_vlse64_v_f64m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m( @@ -157,7 +157,7 @@ vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, const double *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m1_m(mask, base, bstride, vl); + return __riscv_vlse64_v_i64m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m( @@ -166,7 +166,7 @@ vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, const int64_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m2_m(mask, base, bstride, vl); + return __riscv_vlse64_v_i64m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m( @@ -175,7 +175,7 @@ vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, const int64_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m4_m(mask, base, bstride, vl); + return __riscv_vlse64_v_i64m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m( @@ -184,7 +184,7 @@ vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, const int64_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m8_m(mask, base, bstride, vl); + return __riscv_vlse64_v_i64m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m( @@ -193,7 +193,7 @@ vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, const int64_t *base, ptrdiff_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m1_m(mask, base, bstride, vl); + return __riscv_vlse64_v_u64m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m( @@ -202,7 +202,7 @@ vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, const uint64_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m2_m(mask, base, bstride, vl); + return __riscv_vlse64_v_u64m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m( @@ -211,7 +211,7 @@ vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, const uint64_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m4_m(mask, base, bstride, vl); + return __riscv_vlse64_v_u64m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m( @@ -220,6 +220,6 @@ vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, const uint64_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m8_m(mask, base, bstride, vl); + return __riscv_vlse64_v_u64m8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse8.c index 4ad9094b9a99..490623a500f2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlse8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlse8_v_i8mf8(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf8(base, bstride, vl); + return __riscv_vlse8_v_i8mf8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4( @@ -21,7 +21,7 @@ vint8mf8_t test_vlse8_v_i8mf8(const int8_t *base, ptrdiff_t bstride, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlse8_v_i8mf4(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf4(base, bstride, vl); + return __riscv_vlse8_v_i8mf4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2( @@ -30,7 +30,7 @@ vint8mf4_t test_vlse8_v_i8mf4(const int8_t *base, ptrdiff_t bstride, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlse8_v_i8mf2(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf2(base, bstride, vl); + return __riscv_vlse8_v_i8mf2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m1( @@ -39,7 +39,7 @@ vint8mf2_t test_vlse8_v_i8mf2(const int8_t *base, ptrdiff_t bstride, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlse8_v_i8m1(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m1(base, bstride, vl); + return __riscv_vlse8_v_i8m1(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m2( @@ -48,7 +48,7 @@ vint8m1_t test_vlse8_v_i8m1(const int8_t *base, ptrdiff_t bstride, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlse8_v_i8m2(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m2(base, bstride, vl); + return __riscv_vlse8_v_i8m2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m4( @@ -57,7 +57,7 @@ vint8m2_t test_vlse8_v_i8m2(const int8_t *base, ptrdiff_t bstride, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlse8_v_i8m4(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m4(base, bstride, vl); + return __riscv_vlse8_v_i8m4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m8( @@ -66,7 +66,7 @@ vint8m4_t test_vlse8_v_i8m4(const int8_t *base, ptrdiff_t bstride, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlse8_v_i8m8(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m8(base, bstride, vl); + return __riscv_vlse8_v_i8m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8( @@ -75,7 +75,7 @@ vint8m8_t test_vlse8_v_i8m8(const int8_t *base, ptrdiff_t bstride, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlse8_v_u8mf8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf8(base, bstride, vl); + return __riscv_vlse8_v_u8mf8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4( @@ -84,7 +84,7 @@ vuint8mf8_t test_vlse8_v_u8mf8(const uint8_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlse8_v_u8mf4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf4(base, bstride, vl); + return __riscv_vlse8_v_u8mf4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2( @@ -93,7 +93,7 @@ vuint8mf4_t test_vlse8_v_u8mf4(const uint8_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlse8_v_u8mf2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf2(base, bstride, vl); + return __riscv_vlse8_v_u8mf2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m1( @@ -102,7 +102,7 @@ vuint8mf2_t test_vlse8_v_u8mf2(const uint8_t *base, ptrdiff_t bstride, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlse8_v_u8m1(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m1(base, bstride, vl); + return __riscv_vlse8_v_u8m1(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m2( @@ -111,7 +111,7 @@ vuint8m1_t test_vlse8_v_u8m1(const uint8_t *base, ptrdiff_t bstride, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlse8_v_u8m2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m2(base, bstride, vl); + return __riscv_vlse8_v_u8m2(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m4( @@ -120,7 +120,7 @@ vuint8m2_t test_vlse8_v_u8m2(const uint8_t *base, ptrdiff_t bstride, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlse8_v_u8m4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m4(base, bstride, vl); + return __riscv_vlse8_v_u8m4(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m8( @@ -129,7 +129,7 @@ vuint8m4_t test_vlse8_v_u8m4(const uint8_t *base, ptrdiff_t bstride, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlse8_v_u8m8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m8(base, bstride, vl); + return __riscv_vlse8_v_u8m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m( @@ -138,7 +138,7 @@ vuint8m8_t test_vlse8_v_u8m8(const uint8_t *base, ptrdiff_t bstride, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf8_m(mask, base, bstride, vl); + return __riscv_vlse8_v_i8mf8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m( @@ -147,7 +147,7 @@ vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf4_m(mask, base, bstride, vl); + return __riscv_vlse8_v_i8mf4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m( @@ -156,7 +156,7 @@ vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf2_m(mask, base, bstride, vl); + return __riscv_vlse8_v_i8mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m( @@ -165,7 +165,7 @@ vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m1_m(mask, base, bstride, vl); + return __riscv_vlse8_v_i8m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m( @@ -174,7 +174,7 @@ vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstri // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m2_m(mask, base, bstride, vl); + return __riscv_vlse8_v_i8m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m( @@ -183,7 +183,7 @@ vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstri // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m4_m(mask, base, bstride, vl); + return __riscv_vlse8_v_i8m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m( @@ -192,7 +192,7 @@ vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstri // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m8_m(mask, base, bstride, vl); + return __riscv_vlse8_v_i8m8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m( @@ -201,7 +201,7 @@ vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, const int8_t *base, ptrdiff_t bstri // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf8_m(mask, base, bstride, vl); + return __riscv_vlse8_v_u8mf8_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m( @@ -210,7 +210,7 @@ vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf4_m(mask, base, bstride, vl); + return __riscv_vlse8_v_u8mf4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m( @@ -219,7 +219,7 @@ vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf2_m(mask, base, bstride, vl); + return __riscv_vlse8_v_u8mf2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m( @@ -228,7 +228,7 @@ vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m1_m(mask, base, bstride, vl); + return __riscv_vlse8_v_u8m1_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m( @@ -237,7 +237,7 @@ vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bst // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m2_m(mask, base, bstride, vl); + return __riscv_vlse8_v_u8m2_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m( @@ -246,7 +246,7 @@ vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bst // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m4_m(mask, base, bstride, vl); + return __riscv_vlse8_v_u8m4_m(mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m( @@ -255,6 +255,6 @@ vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bst // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m8_m(mask, base, bstride, vl); + return __riscv_vlse8_v_u8m8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16.c index a63b08eb49e6..2cb4e783edd6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4(v0, v1, base, vl); + return __riscv_vlseg2e16_v_f16mf4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2( @@ -30,7 +30,7 @@ void test_vlseg2e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2(v0, v1, base, vl); + return __riscv_vlseg2e16_v_f16mf2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1( @@ -43,7 +43,7 @@ void test_vlseg2e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1(v0, v1, base, vl); + return __riscv_vlseg2e16_v_f16m1(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2( @@ -56,7 +56,7 @@ void test_vlseg2e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 * // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2(v0, v1, base, vl); + return __riscv_vlseg2e16_v_f16m2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4( @@ -69,7 +69,7 @@ void test_vlseg2e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 * // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4(v0, v1, base, vl); + return __riscv_vlseg2e16_v_f16m4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4( @@ -82,7 +82,7 @@ void test_vlseg2e16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 * // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf4(v0, v1, base, vl); + return __riscv_vlseg2e16_v_i16mf4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2( @@ -95,7 +95,7 @@ void test_vlseg2e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf2(v0, v1, base, vl); + return __riscv_vlseg2e16_v_i16mf2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1( @@ -108,7 +108,7 @@ void test_vlseg2e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m1(v0, v1, base, vl); + return __riscv_vlseg2e16_v_i16m1(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2( @@ -121,7 +121,7 @@ void test_vlseg2e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m2(v0, v1, base, vl); + return __riscv_vlseg2e16_v_i16m2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4( @@ -134,7 +134,7 @@ void test_vlseg2e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m4(v0, v1, base, vl); + return __riscv_vlseg2e16_v_i16m4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4( @@ -147,7 +147,7 @@ void test_vlseg2e16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf4(v0, v1, base, vl); + return __riscv_vlseg2e16_v_u16mf4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2( @@ -160,7 +160,7 @@ void test_vlseg2e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf2(v0, v1, base, vl); + return __riscv_vlseg2e16_v_u16mf2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1( @@ -173,7 +173,7 @@ void test_vlseg2e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m1(v0, v1, base, vl); + return __riscv_vlseg2e16_v_u16m1(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2( @@ -186,7 +186,7 @@ void test_vlseg2e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m2(v0, v1, base, vl); + return __riscv_vlseg2e16_v_u16m2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4( @@ -199,7 +199,7 @@ void test_vlseg2e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m4(v0, v1, base, vl); + return __riscv_vlseg2e16_v_u16m4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_m( @@ -212,7 +212,7 @@ void test_vlseg2e16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_f16mf4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_m( @@ -225,7 +225,7 @@ void test_vlseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_f16mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_m( @@ -238,7 +238,7 @@ void test_vlseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_f16m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_m( @@ -251,7 +251,7 @@ void test_vlseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_f16m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_m( @@ -264,7 +264,7 @@ void test_vlseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_f16m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( @@ -277,7 +277,7 @@ void test_vlseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_i16mf4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( @@ -290,7 +290,7 @@ void test_vlseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_i16mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( @@ -303,7 +303,7 @@ void test_vlseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m1_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_i16m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( @@ -316,7 +316,7 @@ void test_vlseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_i16m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( @@ -329,7 +329,7 @@ void test_vlseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, con // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_i16m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( @@ -342,7 +342,7 @@ void test_vlseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, con // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_u16mf4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( @@ -355,7 +355,7 @@ void test_vlseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_u16mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( @@ -368,7 +368,7 @@ void test_vlseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m1_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_u16m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( @@ -381,7 +381,7 @@ void test_vlseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_u16m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( @@ -394,6 +394,6 @@ void test_vlseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e16_v_u16m4_m(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c index 310bbde238f5..697e4edb6c8c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e16ff.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2( @@ -34,7 +34,7 @@ void test_vlseg2e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Floa // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1( @@ -49,7 +49,7 @@ void test_vlseg2e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Floa // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m1(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m1(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2( @@ -64,7 +64,7 @@ void test_vlseg2e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4( @@ -79,7 +79,7 @@ void test_vlseg2e16ff_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4( @@ -94,7 +94,7 @@ void test_vlseg2e16ff_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2( @@ -109,7 +109,7 @@ void test_vlseg2e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1( @@ -124,7 +124,7 @@ void test_vlseg2e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m1(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m1(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2( @@ -139,7 +139,7 @@ void test_vlseg2e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4( @@ -154,7 +154,7 @@ void test_vlseg2e16ff_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4( @@ -169,7 +169,7 @@ void test_vlseg2e16ff_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2( @@ -184,7 +184,7 @@ void test_vlseg2e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1( @@ -199,7 +199,7 @@ void test_vlseg2e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m1(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m1(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2( @@ -214,7 +214,7 @@ void test_vlseg2e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4( @@ -229,7 +229,7 @@ void test_vlseg2e16ff_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_m( @@ -244,7 +244,7 @@ void test_vlseg2e16ff_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_m( @@ -259,7 +259,7 @@ void test_vlseg2e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_m( @@ -274,7 +274,7 @@ void test_vlseg2e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m1_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_m( @@ -289,7 +289,7 @@ void test_vlseg2e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_m( @@ -304,7 +304,7 @@ void test_vlseg2e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( @@ -319,7 +319,7 @@ void test_vlseg2e16ff_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( @@ -334,7 +334,7 @@ void test_vlseg2e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( @@ -349,7 +349,7 @@ void test_vlseg2e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m1_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( @@ -364,7 +364,7 @@ void test_vlseg2e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( @@ -379,7 +379,7 @@ void test_vlseg2e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( @@ -394,7 +394,7 @@ void test_vlseg2e16ff_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( @@ -409,7 +409,7 @@ void test_vlseg2e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( @@ -424,7 +424,7 @@ void test_vlseg2e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m1_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( @@ -439,7 +439,7 @@ void test_vlseg2e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( @@ -454,6 +454,6 @@ void test_vlseg2e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m4_m(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32.c index c57c20aa7900..923693931c10 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2(v0, v1, base, vl); + return __riscv_vlseg2e32_v_f32mf2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1( @@ -30,7 +30,7 @@ void test_vlseg2e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32m1(v0, v1, base, vl); + return __riscv_vlseg2e32_v_f32m1(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2( @@ -43,7 +43,7 @@ void test_vlseg2e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32m2(v0, v1, base, vl); + return __riscv_vlseg2e32_v_f32m2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4( @@ -56,7 +56,7 @@ void test_vlseg2e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32m4(v0, v1, base, vl); + return __riscv_vlseg2e32_v_f32m4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2( @@ -69,7 +69,7 @@ void test_vlseg2e32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32mf2(v0, v1, base, vl); + return __riscv_vlseg2e32_v_i32mf2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1( @@ -82,7 +82,7 @@ void test_vlseg2e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m1(v0, v1, base, vl); + return __riscv_vlseg2e32_v_i32m1(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2( @@ -95,7 +95,7 @@ void test_vlseg2e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m2(v0, v1, base, vl); + return __riscv_vlseg2e32_v_i32m2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4( @@ -108,7 +108,7 @@ void test_vlseg2e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m4(v0, v1, base, vl); + return __riscv_vlseg2e32_v_i32m4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2( @@ -121,7 +121,7 @@ void test_vlseg2e32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32mf2(v0, v1, base, vl); + return __riscv_vlseg2e32_v_u32mf2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1( @@ -134,7 +134,7 @@ void test_vlseg2e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m1(v0, v1, base, vl); + return __riscv_vlseg2e32_v_u32m1(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2( @@ -147,7 +147,7 @@ void test_vlseg2e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m2(v0, v1, base, vl); + return __riscv_vlseg2e32_v_u32m2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4( @@ -160,7 +160,7 @@ void test_vlseg2e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m4(v0, v1, base, vl); + return __riscv_vlseg2e32_v_u32m4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( @@ -173,7 +173,7 @@ void test_vlseg2e32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_f32mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( @@ -186,7 +186,7 @@ void test_vlseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, size_t vl) { - return vlseg2e32_v_f32m1_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_f32m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( @@ -199,7 +199,7 @@ void test_vlseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, size_t vl) { - return vlseg2e32_v_f32m2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_f32m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( @@ -212,7 +212,7 @@ void test_vlseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, size_t vl) { - return vlseg2e32_v_f32m4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_f32m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( @@ -225,7 +225,7 @@ void test_vlseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32mf2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_i32mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( @@ -238,7 +238,7 @@ void test_vlseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m1_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_i32m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( @@ -251,7 +251,7 @@ void test_vlseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_i32m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( @@ -264,7 +264,7 @@ void test_vlseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_i32m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( @@ -277,7 +277,7 @@ void test_vlseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, con // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32mf2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_u32mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( @@ -290,7 +290,7 @@ void test_vlseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m1_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_u32m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( @@ -303,7 +303,7 @@ void test_vlseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_u32m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( @@ -316,6 +316,6 @@ void test_vlseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e32_v_u32m4_m(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c index 9b58f40a1578..f6805a5cd03a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32ff.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32mf2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32mf2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1( @@ -34,7 +34,7 @@ void test_vlseg2e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m1(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m1(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2( @@ -49,7 +49,7 @@ void test_vlseg2e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *b // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4( @@ -64,7 +64,7 @@ void test_vlseg2e32ff_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *b // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2( @@ -79,7 +79,7 @@ void test_vlseg2e32ff_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *b // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32mf2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32mf2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1( @@ -94,7 +94,7 @@ void test_vlseg2e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m1(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m1(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2( @@ -109,7 +109,7 @@ void test_vlseg2e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4( @@ -124,7 +124,7 @@ void test_vlseg2e32ff_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2( @@ -139,7 +139,7 @@ void test_vlseg2e32ff_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32mf2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32mf2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1( @@ -154,7 +154,7 @@ void test_vlseg2e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m1(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m1(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2( @@ -169,7 +169,7 @@ void test_vlseg2e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4( @@ -184,7 +184,7 @@ void test_vlseg2e32ff_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( @@ -199,7 +199,7 @@ void test_vlseg2e32ff_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32mf2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( @@ -214,7 +214,7 @@ void test_vlseg2e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m1_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( @@ -229,7 +229,7 @@ void test_vlseg2e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( @@ -244,7 +244,7 @@ void test_vlseg2e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( @@ -259,7 +259,7 @@ void test_vlseg2e32ff_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32mf2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( @@ -274,7 +274,7 @@ void test_vlseg2e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m1_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( @@ -289,7 +289,7 @@ void test_vlseg2e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( @@ -304,7 +304,7 @@ void test_vlseg2e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( @@ -319,7 +319,7 @@ void test_vlseg2e32ff_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32mf2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( @@ -334,7 +334,7 @@ void test_vlseg2e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m1_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( @@ -349,7 +349,7 @@ void test_vlseg2e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( @@ -364,6 +364,6 @@ void test_vlseg2e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m4_m(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64.c index 00623e573db2..e1f494306372 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, size_t vl) { - return vlseg2e64_v_f64m1(v0, v1, base, vl); + return __riscv_vlseg2e64_v_f64m1(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2( @@ -30,7 +30,7 @@ void test_vlseg2e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, size_t vl) { - return vlseg2e64_v_f64m2(v0, v1, base, vl); + return __riscv_vlseg2e64_v_f64m2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4( @@ -43,7 +43,7 @@ void test_vlseg2e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, size_t vl) { - return vlseg2e64_v_f64m4(v0, v1, base, vl); + return __riscv_vlseg2e64_v_f64m4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1( @@ -56,7 +56,7 @@ void test_vlseg2e64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m1(v0, v1, base, vl); + return __riscv_vlseg2e64_v_i64m1(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2( @@ -69,7 +69,7 @@ void test_vlseg2e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m2(v0, v1, base, vl); + return __riscv_vlseg2e64_v_i64m2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4( @@ -82,7 +82,7 @@ void test_vlseg2e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m4(v0, v1, base, vl); + return __riscv_vlseg2e64_v_i64m4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1( @@ -95,7 +95,7 @@ void test_vlseg2e64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m1(v0, v1, base, vl); + return __riscv_vlseg2e64_v_u64m1(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2( @@ -108,7 +108,7 @@ void test_vlseg2e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m2(v0, v1, base, vl); + return __riscv_vlseg2e64_v_u64m2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4( @@ -121,7 +121,7 @@ void test_vlseg2e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m4(v0, v1, base, vl); + return __riscv_vlseg2e64_v_u64m4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( @@ -134,7 +134,7 @@ void test_vlseg2e64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, size_t vl) { - return vlseg2e64_v_f64m1_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e64_v_f64m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( @@ -147,7 +147,7 @@ void test_vlseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, size_t vl) { - return vlseg2e64_v_f64m2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e64_v_f64m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( @@ -160,7 +160,7 @@ void test_vlseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, size_t vl) { - return vlseg2e64_v_f64m4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e64_v_f64m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( @@ -173,7 +173,7 @@ void test_vlseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m1_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e64_v_i64m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( @@ -186,7 +186,7 @@ void test_vlseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e64_v_i64m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( @@ -199,7 +199,7 @@ void test_vlseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e64_v_i64m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( @@ -212,7 +212,7 @@ void test_vlseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m1_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e64_v_u64m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( @@ -225,7 +225,7 @@ void test_vlseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e64_v_u64m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( @@ -238,6 +238,6 @@ void test_vlseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e64_v_u64m4_m(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c index 1b631b449699..900f14905c67 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e64ff.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m1(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m1(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2( @@ -34,7 +34,7 @@ void test_vlseg2e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double * // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4( @@ -49,7 +49,7 @@ void test_vlseg2e64ff_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double * // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1( @@ -64,7 +64,7 @@ void test_vlseg2e64ff_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double * // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m1(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m1(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2( @@ -79,7 +79,7 @@ void test_vlseg2e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4( @@ -94,7 +94,7 @@ void test_vlseg2e64ff_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1( @@ -109,7 +109,7 @@ void test_vlseg2e64ff_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *bas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m1(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m1(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2( @@ -124,7 +124,7 @@ void test_vlseg2e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4( @@ -139,7 +139,7 @@ void test_vlseg2e64ff_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( @@ -154,7 +154,7 @@ void test_vlseg2e64ff_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t * // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m1_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( @@ -169,7 +169,7 @@ void test_vlseg2e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( @@ -184,7 +184,7 @@ void test_vlseg2e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( @@ -199,7 +199,7 @@ void test_vlseg2e64ff_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m1_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( @@ -214,7 +214,7 @@ void test_vlseg2e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( @@ -229,7 +229,7 @@ void test_vlseg2e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( @@ -244,7 +244,7 @@ void test_vlseg2e64ff_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m1_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( @@ -259,7 +259,7 @@ void test_vlseg2e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( @@ -274,6 +274,6 @@ void test_vlseg2e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m4_m(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8.c index bcf86e991e6c..5e1d1c7de436 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf8(v0, v1, base, vl); + return __riscv_vlseg2e8_v_i8mf8(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4( @@ -29,7 +29,7 @@ void test_vlseg2e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, s // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf4(v0, v1, base, vl); + return __riscv_vlseg2e8_v_i8mf4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2( @@ -42,7 +42,7 @@ void test_vlseg2e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, s // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf2(v0, v1, base, vl); + return __riscv_vlseg2e8_v_i8mf2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1( @@ -55,7 +55,7 @@ void test_vlseg2e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, s // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m1(v0, v1, base, vl); + return __riscv_vlseg2e8_v_i8m1(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2( @@ -68,7 +68,7 @@ void test_vlseg2e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, size // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m2(v0, v1, base, vl); + return __riscv_vlseg2e8_v_i8m2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4( @@ -81,7 +81,7 @@ void test_vlseg2e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, size // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m4(v0, v1, base, vl); + return __riscv_vlseg2e8_v_i8m4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8( @@ -94,7 +94,7 @@ void test_vlseg2e8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, size // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf8(v0, v1, base, vl); + return __riscv_vlseg2e8_v_u8mf8(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4( @@ -107,7 +107,7 @@ void test_vlseg2e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf4(v0, v1, base, vl); + return __riscv_vlseg2e8_v_u8mf4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2( @@ -120,7 +120,7 @@ void test_vlseg2e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf2(v0, v1, base, vl); + return __riscv_vlseg2e8_v_u8mf2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1( @@ -133,7 +133,7 @@ void test_vlseg2e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m1(v0, v1, base, vl); + return __riscv_vlseg2e8_v_u8m1(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2( @@ -146,7 +146,7 @@ void test_vlseg2e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, s // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m2(v0, v1, base, vl); + return __riscv_vlseg2e8_v_u8m2(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4( @@ -159,7 +159,7 @@ void test_vlseg2e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, s // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m4(v0, v1, base, vl); + return __riscv_vlseg2e8_v_u8m4(v0, v1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( @@ -172,7 +172,7 @@ void test_vlseg2e8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, s // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf8_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_i8mf8_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( @@ -185,7 +185,7 @@ void test_vlseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, con // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_i8mf4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( @@ -198,7 +198,7 @@ void test_vlseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, con // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_i8mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( @@ -211,7 +211,7 @@ void test_vlseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, con // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m1_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_i8m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( @@ -224,7 +224,7 @@ void test_vlseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const i // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_i8m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( @@ -237,7 +237,7 @@ void test_vlseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const i // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_i8m4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( @@ -250,7 +250,7 @@ void test_vlseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const i // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf8_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_u8mf8_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( @@ -263,7 +263,7 @@ void test_vlseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_u8mf4_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( @@ -276,7 +276,7 @@ void test_vlseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_u8mf2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( @@ -289,7 +289,7 @@ void test_vlseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m1_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_u8m1_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( @@ -302,7 +302,7 @@ void test_vlseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m2_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_u8m2_m(v0, v1, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( @@ -315,6 +315,6 @@ void test_vlseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m4_m(v0, v1, mask, base, vl); + return __riscv_vlseg2e8_v_u8m4_m(v0, v1, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c index 2c3cb8fbd581..85ac077eef74 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e8ff.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf8(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf8(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4( @@ -34,7 +34,7 @@ void test_vlseg2e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2( @@ -49,7 +49,7 @@ void test_vlseg2e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1( @@ -64,7 +64,7 @@ void test_vlseg2e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m1(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m1(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2( @@ -79,7 +79,7 @@ void test_vlseg2e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, si // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4( @@ -94,7 +94,7 @@ void test_vlseg2e8ff_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, si // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8( @@ -109,7 +109,7 @@ void test_vlseg2e8ff_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, si // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf8(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf8(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4( @@ -124,7 +124,7 @@ void test_vlseg2e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2( @@ -139,7 +139,7 @@ void test_vlseg2e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1( @@ -154,7 +154,7 @@ void test_vlseg2e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *ba // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m1(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m1(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2( @@ -169,7 +169,7 @@ void test_vlseg2e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m2(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m2(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4( @@ -184,7 +184,7 @@ void test_vlseg2e8ff_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m4(v0, v1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m4(v0, v1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( @@ -199,7 +199,7 @@ void test_vlseg2e8ff_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf8_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( @@ -214,7 +214,7 @@ void test_vlseg2e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( @@ -229,7 +229,7 @@ void test_vlseg2e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( @@ -244,7 +244,7 @@ void test_vlseg2e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m1_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( @@ -259,7 +259,7 @@ void test_vlseg2e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( @@ -274,7 +274,7 @@ void test_vlseg2e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( @@ -289,7 +289,7 @@ void test_vlseg2e8ff_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf8_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf8_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( @@ -304,7 +304,7 @@ void test_vlseg2e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf4_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( @@ -319,7 +319,7 @@ void test_vlseg2e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( @@ -334,7 +334,7 @@ void test_vlseg2e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m1_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m1_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( @@ -349,7 +349,7 @@ void test_vlseg2e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, con // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m2_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m2_m(v0, v1, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( @@ -364,6 +364,6 @@ void test_vlseg2e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, con // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m4_m(v0, v1, mask, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m4_m(v0, v1, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16.c index 7cf6edc1b7ce..674b871cc683 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_f16mf4(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2( @@ -34,7 +34,7 @@ void test_vlseg3e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_f16mf2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1( @@ -49,7 +49,7 @@ void test_vlseg3e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_f16m1(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2( @@ -64,7 +64,7 @@ void test_vlseg3e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_f16m2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4( @@ -79,7 +79,7 @@ void test_vlseg3e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf4(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_i16mf4(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2( @@ -94,7 +94,7 @@ void test_vlseg3e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf2(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_i16mf2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1( @@ -109,7 +109,7 @@ void test_vlseg3e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m1(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_i16m1(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2( @@ -124,7 +124,7 @@ void test_vlseg3e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, cons // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m2(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_i16m2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4( @@ -139,7 +139,7 @@ void test_vlseg3e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, cons // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf4(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_u16mf4(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2( @@ -154,7 +154,7 @@ void test_vlseg3e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf2(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_u16mf2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1( @@ -169,7 +169,7 @@ void test_vlseg3e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m1(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_u16m1(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2( @@ -184,7 +184,7 @@ void test_vlseg3e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m2(v0, v1, v2, base, vl); + return __riscv_vlseg3e16_v_u16m2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_m( @@ -199,7 +199,7 @@ void test_vlseg3e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_f16mf4_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_m( @@ -214,7 +214,7 @@ void test_vlseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_f16mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_m( @@ -229,7 +229,7 @@ void test_vlseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_f16m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_m( @@ -244,7 +244,7 @@ void test_vlseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_f16m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( @@ -259,7 +259,7 @@ void test_vlseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf4_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_i16mf4_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( @@ -274,7 +274,7 @@ void test_vlseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_i16mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( @@ -289,7 +289,7 @@ void test_vlseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m1_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_i16m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( @@ -304,7 +304,7 @@ void test_vlseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_i16m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( @@ -319,7 +319,7 @@ void test_vlseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf4_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_u16mf4_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( @@ -334,7 +334,7 @@ void test_vlseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_u16mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( @@ -349,7 +349,7 @@ void test_vlseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m1_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_u16m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( @@ -364,6 +364,6 @@ void test_vlseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e16_v_u16m2_m(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c index 55860b436190..5929f8a1e803 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e16ff.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf4(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf4(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2( @@ -38,7 +38,7 @@ void test_vlseg3e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1( @@ -55,7 +55,7 @@ void test_vlseg3e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m1(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m1(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2( @@ -72,7 +72,7 @@ void test_vlseg3e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4( @@ -89,7 +89,7 @@ void test_vlseg3e16ff_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf4(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf4(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2( @@ -106,7 +106,7 @@ void test_vlseg3e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1( @@ -123,7 +123,7 @@ void test_vlseg3e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m1(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m1(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2( @@ -140,7 +140,7 @@ void test_vlseg3e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4( @@ -157,7 +157,7 @@ void test_vlseg3e16ff_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf4(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf4(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2( @@ -174,7 +174,7 @@ void test_vlseg3e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1( @@ -191,7 +191,7 @@ void test_vlseg3e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m1(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m1(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2( @@ -208,7 +208,7 @@ void test_vlseg3e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_m( @@ -225,7 +225,7 @@ void test_vlseg3e16ff_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf4_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf4_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_m( @@ -242,7 +242,7 @@ void test_vlseg3e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_m( @@ -259,7 +259,7 @@ void test_vlseg3e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m1_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_m( @@ -276,7 +276,7 @@ void test_vlseg3e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( @@ -293,7 +293,7 @@ void test_vlseg3e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf4_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf4_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( @@ -310,7 +310,7 @@ void test_vlseg3e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( @@ -327,7 +327,7 @@ void test_vlseg3e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m1_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( @@ -344,7 +344,7 @@ void test_vlseg3e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( @@ -361,7 +361,7 @@ void test_vlseg3e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf4_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf4_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( @@ -378,7 +378,7 @@ void test_vlseg3e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( @@ -395,7 +395,7 @@ void test_vlseg3e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m1_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( @@ -412,6 +412,6 @@ void test_vlseg3e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m2_m(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32.c index 173ce591da28..86af949b7780 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2(v0, v1, v2, base, vl); + return __riscv_vlseg3e32_v_f32mf2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1( @@ -34,7 +34,7 @@ void test_vlseg3e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, size_t vl) { - return vlseg3e32_v_f32m1(v0, v1, v2, base, vl); + return __riscv_vlseg3e32_v_f32m1(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2( @@ -49,7 +49,7 @@ void test_vlseg3e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, size_t vl) { - return vlseg3e32_v_f32m2(v0, v1, v2, base, vl); + return __riscv_vlseg3e32_v_f32m2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2( @@ -64,7 +64,7 @@ void test_vlseg3e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32mf2(v0, v1, v2, base, vl); + return __riscv_vlseg3e32_v_i32mf2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1( @@ -79,7 +79,7 @@ void test_vlseg3e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m1(v0, v1, v2, base, vl); + return __riscv_vlseg3e32_v_i32m1(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2( @@ -94,7 +94,7 @@ void test_vlseg3e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, cons // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m2(v0, v1, v2, base, vl); + return __riscv_vlseg3e32_v_i32m2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2( @@ -109,7 +109,7 @@ void test_vlseg3e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, cons // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32mf2(v0, v1, v2, base, vl); + return __riscv_vlseg3e32_v_u32mf2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1( @@ -124,7 +124,7 @@ void test_vlseg3e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m1(v0, v1, v2, base, vl); + return __riscv_vlseg3e32_v_u32m1(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2( @@ -139,7 +139,7 @@ void test_vlseg3e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m2(v0, v1, v2, base, vl); + return __riscv_vlseg3e32_v_u32m2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( @@ -154,7 +154,7 @@ void test_vlseg3e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e32_v_f32mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( @@ -169,7 +169,7 @@ void test_vlseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, size_t vl) { - return vlseg3e32_v_f32m1_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e32_v_f32m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( @@ -184,7 +184,7 @@ void test_vlseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, size_t vl) { - return vlseg3e32_v_f32m2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e32_v_f32m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( @@ -199,7 +199,7 @@ void test_vlseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32mf2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e32_v_i32mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( @@ -214,7 +214,7 @@ void test_vlseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m1_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e32_v_i32m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( @@ -229,7 +229,7 @@ void test_vlseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e32_v_i32m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( @@ -244,7 +244,7 @@ void test_vlseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32mf2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e32_v_u32mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( @@ -259,7 +259,7 @@ void test_vlseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m1_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e32_v_u32m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( @@ -274,6 +274,6 @@ void test_vlseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e32_v_u32m2_m(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c index 636061628915..87145bb83ca3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e32ff.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32mf2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32mf2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1( @@ -38,7 +38,7 @@ void test_vlseg3e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m1(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m1(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2( @@ -55,7 +55,7 @@ void test_vlseg3e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2( @@ -72,7 +72,7 @@ void test_vlseg3e32ff_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32mf2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32mf2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1( @@ -89,7 +89,7 @@ void test_vlseg3e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m1(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m1(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2( @@ -106,7 +106,7 @@ void test_vlseg3e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2( @@ -123,7 +123,7 @@ void test_vlseg3e32ff_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32mf2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32mf2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1( @@ -140,7 +140,7 @@ void test_vlseg3e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m1(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m1(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2( @@ -157,7 +157,7 @@ void test_vlseg3e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( @@ -174,7 +174,7 @@ void test_vlseg3e32ff_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32mf2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( @@ -191,7 +191,7 @@ void test_vlseg3e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m1_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( @@ -208,7 +208,7 @@ void test_vlseg3e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( @@ -225,7 +225,7 @@ void test_vlseg3e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32mf2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( @@ -242,7 +242,7 @@ void test_vlseg3e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m1_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( @@ -259,7 +259,7 @@ void test_vlseg3e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( @@ -276,7 +276,7 @@ void test_vlseg3e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32mf2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( @@ -293,7 +293,7 @@ void test_vlseg3e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m1_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( @@ -310,6 +310,6 @@ void test_vlseg3e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m2_m(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64.c index 6dc332215995..27cb455f45a1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, size_t vl) { - return vlseg3e64_v_f64m1(v0, v1, v2, base, vl); + return __riscv_vlseg3e64_v_f64m1(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2( @@ -34,7 +34,7 @@ void test_vlseg3e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, size_t vl) { - return vlseg3e64_v_f64m2(v0, v1, v2, base, vl); + return __riscv_vlseg3e64_v_f64m2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1( @@ -49,7 +49,7 @@ void test_vlseg3e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m1(v0, v1, v2, base, vl); + return __riscv_vlseg3e64_v_i64m1(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2( @@ -64,7 +64,7 @@ void test_vlseg3e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, cons // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m2(v0, v1, v2, base, vl); + return __riscv_vlseg3e64_v_i64m2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1( @@ -79,7 +79,7 @@ void test_vlseg3e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, cons // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m1(v0, v1, v2, base, vl); + return __riscv_vlseg3e64_v_u64m1(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2( @@ -94,7 +94,7 @@ void test_vlseg3e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m2(v0, v1, v2, base, vl); + return __riscv_vlseg3e64_v_u64m2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( @@ -109,7 +109,7 @@ void test_vlseg3e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, size_t vl) { - return vlseg3e64_v_f64m1_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e64_v_f64m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( @@ -124,7 +124,7 @@ void test_vlseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, size_t vl) { - return vlseg3e64_v_f64m2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e64_v_f64m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( @@ -139,7 +139,7 @@ void test_vlseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m1_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e64_v_i64m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( @@ -154,7 +154,7 @@ void test_vlseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e64_v_i64m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( @@ -169,7 +169,7 @@ void test_vlseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m1_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e64_v_u64m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( @@ -184,6 +184,6 @@ void test_vlseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e64_v_u64m2_m(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c index a537878d2377..7b170b1f8ea7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e64ff.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m1(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m1(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2( @@ -38,7 +38,7 @@ void test_vlseg3e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1( @@ -55,7 +55,7 @@ void test_vlseg3e64ff_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m1(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m1(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2( @@ -72,7 +72,7 @@ void test_vlseg3e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1( @@ -89,7 +89,7 @@ void test_vlseg3e64ff_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m1(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m1(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2( @@ -106,7 +106,7 @@ void test_vlseg3e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( @@ -123,7 +123,7 @@ void test_vlseg3e64ff_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m1_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( @@ -140,7 +140,7 @@ void test_vlseg3e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( @@ -157,7 +157,7 @@ void test_vlseg3e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m1_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( @@ -174,7 +174,7 @@ void test_vlseg3e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( @@ -191,7 +191,7 @@ void test_vlseg3e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m1_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( @@ -208,6 +208,6 @@ void test_vlseg3e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m2_m(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8.c index 312a869fd4d1..9b28c4a4ccd8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8.c @@ -18,7 +18,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf8(v0, v1, v2, base, vl); + return __riscv_vlseg3e8_v_i8mf8(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4( @@ -33,7 +33,7 @@ void test_vlseg3e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf4(v0, v1, v2, base, vl); + return __riscv_vlseg3e8_v_i8mf4(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2( @@ -48,7 +48,7 @@ void test_vlseg3e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf2(v0, v1, v2, base, vl); + return __riscv_vlseg3e8_v_i8mf2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1( @@ -63,7 +63,7 @@ void test_vlseg3e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m1(v0, v1, v2, base, vl); + return __riscv_vlseg3e8_v_i8m1(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2( @@ -78,7 +78,7 @@ void test_vlseg3e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m2(v0, v1, v2, base, vl); + return __riscv_vlseg3e8_v_i8m2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8( @@ -93,7 +93,7 @@ void test_vlseg3e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf8(v0, v1, v2, base, vl); + return __riscv_vlseg3e8_v_u8mf8(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4( @@ -108,7 +108,7 @@ void test_vlseg3e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf4(v0, v1, v2, base, vl); + return __riscv_vlseg3e8_v_u8mf4(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2( @@ -123,7 +123,7 @@ void test_vlseg3e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf2(v0, v1, v2, base, vl); + return __riscv_vlseg3e8_v_u8mf2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1( @@ -138,7 +138,7 @@ void test_vlseg3e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m1(v0, v1, v2, base, vl); + return __riscv_vlseg3e8_v_u8m1(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2( @@ -153,7 +153,7 @@ void test_vlseg3e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m2(v0, v1, v2, base, vl); + return __riscv_vlseg3e8_v_u8m2(v0, v1, v2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( @@ -168,7 +168,7 @@ void test_vlseg3e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf8_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e8_v_i8mf8_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( @@ -183,7 +183,7 @@ void test_vlseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf4_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e8_v_i8mf4_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( @@ -198,7 +198,7 @@ void test_vlseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e8_v_i8mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( @@ -213,7 +213,7 @@ void test_vlseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m1_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e8_v_i8m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( @@ -228,7 +228,7 @@ void test_vlseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e8_v_i8m2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( @@ -243,7 +243,7 @@ void test_vlseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf8_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e8_v_u8mf8_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( @@ -258,7 +258,7 @@ void test_vlseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf4_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e8_v_u8mf4_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( @@ -273,7 +273,7 @@ void test_vlseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e8_v_u8mf2_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( @@ -288,7 +288,7 @@ void test_vlseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m1_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e8_v_u8m1_m(v0, v1, v2, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( @@ -303,6 +303,6 @@ void test_vlseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vboo // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m2_m(v0, v1, v2, mask, base, vl); + return __riscv_vlseg3e8_v_u8m2_m(v0, v1, v2, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c index 9251167cf23e..696d072e518d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg3e8ff.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf8(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf8(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4( @@ -38,7 +38,7 @@ void test_vlseg3e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf4(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf4(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2( @@ -55,7 +55,7 @@ void test_vlseg3e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1( @@ -72,7 +72,7 @@ void test_vlseg3e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m1(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m1(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2( @@ -89,7 +89,7 @@ void test_vlseg3e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const i // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8( @@ -106,7 +106,7 @@ void test_vlseg3e8ff_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const i // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf8(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf8(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4( @@ -123,7 +123,7 @@ void test_vlseg3e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf4(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf4(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2( @@ -140,7 +140,7 @@ void test_vlseg3e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1( @@ -157,7 +157,7 @@ void test_vlseg3e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m1(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m1(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2( @@ -174,7 +174,7 @@ void test_vlseg3e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, cons // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m2(v0, v1, v2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m2(v0, v1, v2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( @@ -191,7 +191,7 @@ void test_vlseg3e8ff_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, cons // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf8_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf8_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( @@ -208,7 +208,7 @@ void test_vlseg3e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf4_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf4_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( @@ -225,7 +225,7 @@ void test_vlseg3e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( @@ -242,7 +242,7 @@ void test_vlseg3e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m1_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( @@ -259,7 +259,7 @@ void test_vlseg3e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( @@ -276,7 +276,7 @@ void test_vlseg3e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf8_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf8_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( @@ -293,7 +293,7 @@ void test_vlseg3e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf4_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf4_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( @@ -310,7 +310,7 @@ void test_vlseg3e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf2_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( @@ -327,7 +327,7 @@ void test_vlseg3e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m1_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m1_m(v0, v1, v2, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( @@ -344,6 +344,6 @@ void test_vlseg3e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m2_m(v0, v1, v2, mask, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m2_m(v0, v1, v2, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16.c index 872686b60888..0cb953134332 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_f16mf4(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2( @@ -38,7 +38,7 @@ void test_vlseg4e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_f16mf2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1( @@ -55,7 +55,7 @@ void test_vlseg4e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_f16m1(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2( @@ -72,7 +72,7 @@ void test_vlseg4e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_f16m2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4( @@ -89,7 +89,7 @@ void test_vlseg4e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf4(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_i16mf4(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2( @@ -106,7 +106,7 @@ void test_vlseg4e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_i16mf2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1( @@ -123,7 +123,7 @@ void test_vlseg4e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m1(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_i16m1(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2( @@ -140,7 +140,7 @@ void test_vlseg4e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_i16m2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4( @@ -157,7 +157,7 @@ void test_vlseg4e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf4(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_u16mf4(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2( @@ -174,7 +174,7 @@ void test_vlseg4e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_u16mf2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1( @@ -191,7 +191,7 @@ void test_vlseg4e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m1(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_u16m1(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2( @@ -208,7 +208,7 @@ void test_vlseg4e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e16_v_u16m2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_m( @@ -225,7 +225,7 @@ void test_vlseg4e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_m( @@ -242,7 +242,7 @@ void test_vlseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_m( @@ -259,7 +259,7 @@ void test_vlseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_m( @@ -276,7 +276,7 @@ void test_vlseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( @@ -293,7 +293,7 @@ void test_vlseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( @@ -310,7 +310,7 @@ void test_vlseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( @@ -327,7 +327,7 @@ void test_vlseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( @@ -344,7 +344,7 @@ void test_vlseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( @@ -361,7 +361,7 @@ void test_vlseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( @@ -378,7 +378,7 @@ void test_vlseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( @@ -395,7 +395,7 @@ void test_vlseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( @@ -412,6 +412,6 @@ void test_vlseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c index c71230a25e1b..e4be39213c9a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e16ff.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf4(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf4(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2( @@ -42,7 +42,7 @@ void test_vlseg4e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1( @@ -61,7 +61,7 @@ void test_vlseg4e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m1(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m1(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2( @@ -80,7 +80,7 @@ void test_vlseg4e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4( @@ -99,7 +99,7 @@ void test_vlseg4e16ff_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf4(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf4(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2( @@ -118,7 +118,7 @@ void test_vlseg4e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1( @@ -137,7 +137,7 @@ void test_vlseg4e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m1(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m1(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2( @@ -156,7 +156,7 @@ void test_vlseg4e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4( @@ -175,7 +175,7 @@ void test_vlseg4e16ff_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf4(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf4(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2( @@ -194,7 +194,7 @@ void test_vlseg4e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1( @@ -213,7 +213,7 @@ void test_vlseg4e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m1(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m1(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2( @@ -232,7 +232,7 @@ void test_vlseg4e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_m( @@ -251,7 +251,7 @@ void test_vlseg4e16ff_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_m( @@ -270,7 +270,7 @@ void test_vlseg4e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_m( @@ -289,7 +289,7 @@ void test_vlseg4e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_m( @@ -308,7 +308,7 @@ void test_vlseg4e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( @@ -327,7 +327,7 @@ void test_vlseg4e16ff_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vlseg4e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( @@ -365,7 +365,7 @@ void test_vlseg4e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( @@ -384,7 +384,7 @@ void test_vlseg4e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( @@ -403,7 +403,7 @@ void test_vlseg4e16ff_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( @@ -422,7 +422,7 @@ void test_vlseg4e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( @@ -441,7 +441,7 @@ void test_vlseg4e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( @@ -460,6 +460,6 @@ void test_vlseg4e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32.c index 0e9c30c8e4f6..deb45c1aed08 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e32_v_f32mf2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1( @@ -38,7 +38,7 @@ void test_vlseg4e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, size_t vl) { - return vlseg4e32_v_f32m1(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e32_v_f32m1(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2( @@ -55,7 +55,7 @@ void test_vlseg4e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, size_t vl) { - return vlseg4e32_v_f32m2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e32_v_f32m2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2( @@ -72,7 +72,7 @@ void test_vlseg4e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32mf2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e32_v_i32mf2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1( @@ -89,7 +89,7 @@ void test_vlseg4e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m1(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e32_v_i32m1(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2( @@ -106,7 +106,7 @@ void test_vlseg4e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e32_v_i32m2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2( @@ -123,7 +123,7 @@ void test_vlseg4e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32mf2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e32_v_u32mf2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1( @@ -140,7 +140,7 @@ void test_vlseg4e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m1(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e32_v_u32m1(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2( @@ -157,7 +157,7 @@ void test_vlseg4e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e32_v_u32m2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( @@ -174,7 +174,7 @@ void test_vlseg4e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( @@ -191,7 +191,7 @@ void test_vlseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, size_t vl) { - return vlseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( @@ -208,7 +208,7 @@ void test_vlseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, size_t vl) { - return vlseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( @@ -225,7 +225,7 @@ void test_vlseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( @@ -242,7 +242,7 @@ void test_vlseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( @@ -259,7 +259,7 @@ void test_vlseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( @@ -276,7 +276,7 @@ void test_vlseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( @@ -293,7 +293,7 @@ void test_vlseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( @@ -310,6 +310,6 @@ void test_vlseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c index c085b9130bf4..6b4c323857aa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e32ff.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32mf2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32mf2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1( @@ -42,7 +42,7 @@ void test_vlseg4e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m1(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m1(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2( @@ -61,7 +61,7 @@ void test_vlseg4e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2( @@ -80,7 +80,7 @@ void test_vlseg4e32ff_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32mf2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32mf2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1( @@ -99,7 +99,7 @@ void test_vlseg4e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m1(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m1(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2( @@ -118,7 +118,7 @@ void test_vlseg4e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2( @@ -137,7 +137,7 @@ void test_vlseg4e32ff_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32mf2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32mf2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1( @@ -156,7 +156,7 @@ void test_vlseg4e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m1(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m1(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2( @@ -175,7 +175,7 @@ void test_vlseg4e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( @@ -194,7 +194,7 @@ void test_vlseg4e32ff_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( @@ -213,7 +213,7 @@ void test_vlseg4e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( @@ -232,7 +232,7 @@ void test_vlseg4e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( @@ -251,7 +251,7 @@ void test_vlseg4e32ff_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( @@ -270,7 +270,7 @@ void test_vlseg4e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( @@ -289,7 +289,7 @@ void test_vlseg4e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( @@ -308,7 +308,7 @@ void test_vlseg4e32ff_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( @@ -327,7 +327,7 @@ void test_vlseg4e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( @@ -346,6 +346,6 @@ void test_vlseg4e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64.c index 7a3b26b5f8e9..7253c2eec585 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, size_t vl) { - return vlseg4e64_v_f64m1(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e64_v_f64m1(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2( @@ -38,7 +38,7 @@ void test_vlseg4e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, size_t vl) { - return vlseg4e64_v_f64m2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e64_v_f64m2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1( @@ -55,7 +55,7 @@ void test_vlseg4e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m1(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e64_v_i64m1(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2( @@ -72,7 +72,7 @@ void test_vlseg4e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e64_v_i64m2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1( @@ -89,7 +89,7 @@ void test_vlseg4e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m1(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e64_v_u64m1(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2( @@ -106,7 +106,7 @@ void test_vlseg4e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e64_v_u64m2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( @@ -123,7 +123,7 @@ void test_vlseg4e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, size_t vl) { - return vlseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( @@ -140,7 +140,7 @@ void test_vlseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, size_t vl) { - return vlseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( @@ -157,7 +157,7 @@ void test_vlseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( @@ -174,7 +174,7 @@ void test_vlseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( @@ -191,7 +191,7 @@ void test_vlseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( @@ -208,6 +208,6 @@ void test_vlseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c index b18ea64ecaae..92e28f2a9674 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e64ff.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m1(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m1(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2( @@ -42,7 +42,7 @@ void test_vlseg4e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1( @@ -61,7 +61,7 @@ void test_vlseg4e64ff_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m1(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m1(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2( @@ -80,7 +80,7 @@ void test_vlseg4e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1( @@ -99,7 +99,7 @@ void test_vlseg4e64ff_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m1(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m1(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2( @@ -118,7 +118,7 @@ void test_vlseg4e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( @@ -137,7 +137,7 @@ void test_vlseg4e64ff_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( @@ -156,7 +156,7 @@ void test_vlseg4e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( @@ -175,7 +175,7 @@ void test_vlseg4e64ff_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( @@ -194,7 +194,7 @@ void test_vlseg4e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( @@ -213,7 +213,7 @@ void test_vlseg4e64ff_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( @@ -232,6 +232,6 @@ void test_vlseg4e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8.c index 1855b3f3a904..7300012a945e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8.c @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf8(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e8_v_i8mf8(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4( @@ -37,7 +37,7 @@ void test_vlseg4e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf4(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e8_v_i8mf4(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2( @@ -54,7 +54,7 @@ void test_vlseg4e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e8_v_i8mf2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1( @@ -71,7 +71,7 @@ void test_vlseg4e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m1(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e8_v_i8m1(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2( @@ -88,7 +88,7 @@ void test_vlseg4e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e8_v_i8m2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8( @@ -105,7 +105,7 @@ void test_vlseg4e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf8(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e8_v_u8mf8(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4( @@ -122,7 +122,7 @@ void test_vlseg4e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf4(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e8_v_u8mf4(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2( @@ -139,7 +139,7 @@ void test_vlseg4e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e8_v_u8mf2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1( @@ -156,7 +156,7 @@ void test_vlseg4e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m1(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e8_v_u8m1(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2( @@ -173,7 +173,7 @@ void test_vlseg4e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m2(v0, v1, v2, v3, base, vl); + return __riscv_vlseg4e8_v_u8m2(v0, v1, v2, v3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( @@ -190,7 +190,7 @@ void test_vlseg4e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( @@ -207,7 +207,7 @@ void test_vlseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( @@ -224,7 +224,7 @@ void test_vlseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( @@ -241,7 +241,7 @@ void test_vlseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( @@ -258,7 +258,7 @@ void test_vlseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( @@ -275,7 +275,7 @@ void test_vlseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( @@ -292,7 +292,7 @@ void test_vlseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( @@ -309,7 +309,7 @@ void test_vlseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( @@ -326,7 +326,7 @@ void test_vlseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( @@ -343,6 +343,6 @@ void test_vlseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuin // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, base, vl); + return __riscv_vlseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c index ce3227920ea9..0f50dad828fa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg4e8ff.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf8(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf8(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4( @@ -42,7 +42,7 @@ void test_vlseg4e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf4(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf4(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2( @@ -61,7 +61,7 @@ void test_vlseg4e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1( @@ -80,7 +80,7 @@ void test_vlseg4e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m1(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m1(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2( @@ -99,7 +99,7 @@ void test_vlseg4e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8( @@ -118,7 +118,7 @@ void test_vlseg4e8ff_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf8(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf8(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4( @@ -137,7 +137,7 @@ void test_vlseg4e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf4(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf4(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2( @@ -156,7 +156,7 @@ void test_vlseg4e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1( @@ -175,7 +175,7 @@ void test_vlseg4e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m1(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m1(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2( @@ -194,7 +194,7 @@ void test_vlseg4e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuin // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m2(v0, v1, v2, v3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m2(v0, v1, v2, v3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( @@ -213,7 +213,7 @@ void test_vlseg4e8ff_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuin // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf8_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf8_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( @@ -232,7 +232,7 @@ void test_vlseg4e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( @@ -251,7 +251,7 @@ void test_vlseg4e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( @@ -270,7 +270,7 @@ void test_vlseg4e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( @@ -289,7 +289,7 @@ void test_vlseg4e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( @@ -308,7 +308,7 @@ void test_vlseg4e8ff_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf8_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf8_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( @@ -327,7 +327,7 @@ void test_vlseg4e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf4_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( @@ -346,7 +346,7 @@ void test_vlseg4e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( @@ -365,7 +365,7 @@ void test_vlseg4e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m1_m(v0, v1, v2, v3, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( @@ -384,6 +384,6 @@ void test_vlseg4e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m2_m(v0, v1, v2, v3, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16.c index a6f1d4d4e1c9..01764f94ed11 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2( @@ -42,7 +42,7 @@ void test_vlseg5e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1( @@ -61,7 +61,7 @@ void test_vlseg5e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4( @@ -80,7 +80,7 @@ void test_vlseg5e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf4(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e16_v_i16mf4(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2( @@ -99,7 +99,7 @@ void test_vlseg5e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf2(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e16_v_i16mf2(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1( @@ -118,7 +118,7 @@ void test_vlseg5e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16m1(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e16_v_i16m1(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4( @@ -137,7 +137,7 @@ void test_vlseg5e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf4(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e16_v_u16mf4(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2( @@ -156,7 +156,7 @@ void test_vlseg5e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf2(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e16_v_u16mf2(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1( @@ -175,7 +175,7 @@ void test_vlseg5e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16m1(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e16_v_u16m1(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_m( @@ -194,7 +194,7 @@ void test_vlseg5e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_m( @@ -213,7 +213,7 @@ void test_vlseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_m( @@ -232,7 +232,7 @@ void test_vlseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( @@ -251,7 +251,7 @@ void test_vlseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( @@ -270,7 +270,7 @@ void test_vlseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( @@ -289,7 +289,7 @@ void test_vlseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( @@ -308,7 +308,7 @@ void test_vlseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( @@ -327,7 +327,7 @@ void test_vlseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( @@ -346,6 +346,6 @@ void test_vlseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c index 64f16289b21a..c4e1a2cc6d49 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e16ff.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2( @@ -46,7 +46,7 @@ void test_vlseg5e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1( @@ -67,7 +67,7 @@ void test_vlseg5e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16m1(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16m1(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4( @@ -88,7 +88,7 @@ void test_vlseg5e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2( @@ -109,7 +109,7 @@ void test_vlseg5e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1( @@ -130,7 +130,7 @@ void test_vlseg5e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16m1(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16m1(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4( @@ -151,7 +151,7 @@ void test_vlseg5e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2( @@ -172,7 +172,7 @@ void test_vlseg5e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1( @@ -193,7 +193,7 @@ void test_vlseg5e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16m1(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16m1(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_m( @@ -214,7 +214,7 @@ void test_vlseg5e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_m( @@ -235,7 +235,7 @@ void test_vlseg5e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_m( @@ -256,7 +256,7 @@ void test_vlseg5e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( @@ -277,7 +277,7 @@ void test_vlseg5e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( @@ -298,7 +298,7 @@ void test_vlseg5e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( @@ -319,7 +319,7 @@ void test_vlseg5e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( @@ -340,7 +340,7 @@ void test_vlseg5e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( @@ -361,7 +361,7 @@ void test_vlseg5e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( @@ -382,6 +382,6 @@ void test_vlseg5e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32.c index 51bfca243262..996a75996bfc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1( @@ -42,7 +42,7 @@ void test_vlseg5e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, size_t vl) { - return vlseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2( @@ -61,7 +61,7 @@ void test_vlseg5e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32mf2(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e32_v_i32mf2(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1( @@ -80,7 +80,7 @@ void test_vlseg5e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32m1(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e32_v_i32m1(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2( @@ -99,7 +99,7 @@ void test_vlseg5e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32mf2(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e32_v_u32mf2(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1( @@ -118,7 +118,7 @@ void test_vlseg5e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32m1(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e32_v_u32m1(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( @@ -137,7 +137,7 @@ void test_vlseg5e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( @@ -156,7 +156,7 @@ void test_vlseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, size_t vl) { - return vlseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( @@ -175,7 +175,7 @@ void test_vlseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( @@ -194,7 +194,7 @@ void test_vlseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( @@ -213,7 +213,7 @@ void test_vlseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( @@ -232,6 +232,6 @@ void test_vlseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c index 4918595da73d..5d0fada9413c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e32ff.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1( @@ -46,7 +46,7 @@ void test_vlseg5e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32m1(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32m1(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2( @@ -67,7 +67,7 @@ void test_vlseg5e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1( @@ -88,7 +88,7 @@ void test_vlseg5e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32m1(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32m1(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2( @@ -109,7 +109,7 @@ void test_vlseg5e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1( @@ -130,7 +130,7 @@ void test_vlseg5e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32m1(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32m1(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( @@ -151,7 +151,7 @@ void test_vlseg5e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( @@ -172,7 +172,7 @@ void test_vlseg5e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( @@ -193,7 +193,7 @@ void test_vlseg5e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( @@ -214,7 +214,7 @@ void test_vlseg5e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( @@ -235,7 +235,7 @@ void test_vlseg5e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( @@ -256,6 +256,6 @@ void test_vlseg5e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64.c index 37712de9262e..391400f380f6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, size_t vl) { - return vlseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1( @@ -42,7 +42,7 @@ void test_vlseg5e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, size_t vl) { - return vlseg5e64_v_i64m1(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e64_v_i64m1(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1( @@ -61,7 +61,7 @@ void test_vlseg5e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, size_t vl) { - return vlseg5e64_v_u64m1(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e64_v_u64m1(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( @@ -80,7 +80,7 @@ void test_vlseg5e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, size_t vl) { - return vlseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( @@ -99,7 +99,7 @@ void test_vlseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, size_t vl) { - return vlseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( @@ -118,6 +118,6 @@ void test_vlseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, size_t vl) { - return vlseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c index 3d0e8606234c..78e27dc3c780 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e64ff.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_f64m1(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_f64m1(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1( @@ -46,7 +46,7 @@ void test_vlseg5e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_i64m1(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_i64m1(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1( @@ -67,7 +67,7 @@ void test_vlseg5e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_u64m1(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_u64m1(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( @@ -88,7 +88,7 @@ void test_vlseg5e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( @@ -109,7 +109,7 @@ void test_vlseg5e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( @@ -130,6 +130,6 @@ void test_vlseg5e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8.c index 120396587529..fe79f70edc0e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8.c @@ -22,7 +22,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf8(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e8_v_i8mf8(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4( @@ -41,7 +41,7 @@ void test_vlseg5e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf4(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e8_v_i8mf4(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2( @@ -60,7 +60,7 @@ void test_vlseg5e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf2(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e8_v_i8mf2(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1( @@ -79,7 +79,7 @@ void test_vlseg5e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8m1(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e8_v_i8m1(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8( @@ -98,7 +98,7 @@ void test_vlseg5e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf8(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e8_v_u8mf8(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4( @@ -117,7 +117,7 @@ void test_vlseg5e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf4(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e8_v_u8mf4(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2( @@ -136,7 +136,7 @@ void test_vlseg5e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf2(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e8_v_u8mf2(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1( @@ -155,7 +155,7 @@ void test_vlseg5e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8m1(v0, v1, v2, v3, v4, base, vl); + return __riscv_vlseg5e8_v_u8m1(v0, v1, v2, v3, v4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( @@ -174,7 +174,7 @@ void test_vlseg5e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( @@ -193,7 +193,7 @@ void test_vlseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( @@ -212,7 +212,7 @@ void test_vlseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( @@ -231,7 +231,7 @@ void test_vlseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( @@ -250,7 +250,7 @@ void test_vlseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( @@ -269,7 +269,7 @@ void test_vlseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( @@ -288,7 +288,7 @@ void test_vlseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( @@ -307,6 +307,6 @@ void test_vlseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, vl); + return __riscv_vlseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c index c31a01aaf15f..cb26d1b57dcc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg5e8ff.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf8(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf8(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4( @@ -46,7 +46,7 @@ void test_vlseg5e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf4(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf4(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2( @@ -67,7 +67,7 @@ void test_vlseg5e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf2(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf2(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1( @@ -88,7 +88,7 @@ void test_vlseg5e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8m1(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8m1(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8( @@ -109,7 +109,7 @@ void test_vlseg5e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf8(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf8(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4( @@ -130,7 +130,7 @@ void test_vlseg5e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf4(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf4(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2( @@ -151,7 +151,7 @@ void test_vlseg5e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf2(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf2(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1( @@ -172,7 +172,7 @@ void test_vlseg5e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8m1(v0, v1, v2, v3, v4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8m1(v0, v1, v2, v3, v4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( @@ -193,7 +193,7 @@ void test_vlseg5e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuin // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( @@ -214,7 +214,7 @@ void test_vlseg5e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( @@ -235,7 +235,7 @@ void test_vlseg5e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( @@ -256,7 +256,7 @@ void test_vlseg5e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( @@ -277,7 +277,7 @@ void test_vlseg5e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( @@ -298,7 +298,7 @@ void test_vlseg5e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( @@ -319,7 +319,7 @@ void test_vlseg5e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( @@ -340,6 +340,6 @@ void test_vlseg5e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16.c index 4eab7bae122d..145e3bd88f4f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2( @@ -46,7 +46,7 @@ void test_vlseg6e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1( @@ -67,7 +67,7 @@ void test_vlseg6e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4( @@ -88,7 +88,7 @@ void test_vlseg6e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2( @@ -109,7 +109,7 @@ void test_vlseg6e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1( @@ -130,7 +130,7 @@ void test_vlseg6e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16m1(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e16_v_i16m1(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4( @@ -151,7 +151,7 @@ void test_vlseg6e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2( @@ -172,7 +172,7 @@ void test_vlseg6e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1( @@ -193,7 +193,7 @@ void test_vlseg6e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16m1(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e16_v_u16m1(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_m( @@ -214,7 +214,7 @@ void test_vlseg6e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_m( @@ -235,7 +235,7 @@ void test_vlseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_m( @@ -256,7 +256,7 @@ void test_vlseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( @@ -277,7 +277,7 @@ void test_vlseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( @@ -298,7 +298,7 @@ void test_vlseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( @@ -319,7 +319,7 @@ void test_vlseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( @@ -340,7 +340,7 @@ void test_vlseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( @@ -361,7 +361,7 @@ void test_vlseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( @@ -382,6 +382,6 @@ void test_vlseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c index 1ce0af87b187..756822378182 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e16ff.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2( @@ -50,7 +50,7 @@ void test_vlseg6e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1( @@ -73,7 +73,7 @@ void test_vlseg6e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4( @@ -96,7 +96,7 @@ void test_vlseg6e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2( @@ -119,7 +119,7 @@ void test_vlseg6e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1( @@ -142,7 +142,7 @@ void test_vlseg6e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4( @@ -165,7 +165,7 @@ void test_vlseg6e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2( @@ -188,7 +188,7 @@ void test_vlseg6e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1( @@ -211,7 +211,7 @@ void test_vlseg6e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_m( @@ -234,7 +234,7 @@ void test_vlseg6e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_m( @@ -257,7 +257,7 @@ void test_vlseg6e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_m( @@ -280,7 +280,7 @@ void test_vlseg6e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( @@ -303,7 +303,7 @@ void test_vlseg6e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( @@ -326,7 +326,7 @@ void test_vlseg6e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( @@ -349,7 +349,7 @@ void test_vlseg6e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( @@ -372,7 +372,7 @@ void test_vlseg6e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( @@ -395,7 +395,7 @@ void test_vlseg6e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( @@ -418,6 +418,6 @@ void test_vlseg6e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32.c index b200bbb5460f..a8e6c0c1d709 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1( @@ -46,7 +46,7 @@ void test_vlseg6e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, size_t vl) { - return vlseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2( @@ -67,7 +67,7 @@ void test_vlseg6e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1( @@ -88,7 +88,7 @@ void test_vlseg6e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32m1(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e32_v_i32m1(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2( @@ -109,7 +109,7 @@ void test_vlseg6e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1( @@ -130,7 +130,7 @@ void test_vlseg6e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32m1(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e32_v_u32m1(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( @@ -151,7 +151,7 @@ void test_vlseg6e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( @@ -172,7 +172,7 @@ void test_vlseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, size_t vl) { - return vlseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( @@ -193,7 +193,7 @@ void test_vlseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( @@ -214,7 +214,7 @@ void test_vlseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( @@ -235,7 +235,7 @@ void test_vlseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( @@ -256,6 +256,6 @@ void test_vlseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c index 4ab705e4dec7..932fab44f71b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e32ff.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1( @@ -50,7 +50,7 @@ void test_vlseg6e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2( @@ -73,7 +73,7 @@ void test_vlseg6e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1( @@ -96,7 +96,7 @@ void test_vlseg6e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2( @@ -119,7 +119,7 @@ void test_vlseg6e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1( @@ -142,7 +142,7 @@ void test_vlseg6e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( @@ -165,7 +165,7 @@ void test_vlseg6e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( @@ -188,7 +188,7 @@ void test_vlseg6e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( @@ -211,7 +211,7 @@ void test_vlseg6e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( @@ -234,7 +234,7 @@ void test_vlseg6e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( @@ -257,7 +257,7 @@ void test_vlseg6e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( @@ -280,6 +280,6 @@ void test_vlseg6e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64.c index c6774f1c6a04..6c97c6bab892 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, size_t vl) { - return vlseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1( @@ -46,7 +46,7 @@ void test_vlseg6e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, size_t vl) { - return vlseg6e64_v_i64m1(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e64_v_i64m1(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1( @@ -67,7 +67,7 @@ void test_vlseg6e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, size_t vl) { - return vlseg6e64_v_u64m1(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e64_v_u64m1(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( @@ -88,7 +88,7 @@ void test_vlseg6e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, size_t vl) { - return vlseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( @@ -109,7 +109,7 @@ void test_vlseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, size_t vl) { - return vlseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( @@ -130,6 +130,6 @@ void test_vlseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, size_t vl) { - return vlseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c index 6ac666ab8c5f..f1e9d62ddd24 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e64ff.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1( @@ -50,7 +50,7 @@ void test_vlseg6e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1( @@ -73,7 +73,7 @@ void test_vlseg6e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( @@ -96,7 +96,7 @@ void test_vlseg6e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( @@ -119,7 +119,7 @@ void test_vlseg6e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( @@ -142,6 +142,6 @@ void test_vlseg6e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8.c index 01b1f73662ab..6ec75d36dede 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8.c @@ -24,7 +24,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4( @@ -45,7 +45,7 @@ void test_vlseg6e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2( @@ -66,7 +66,7 @@ void test_vlseg6e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1( @@ -87,7 +87,7 @@ void test_vlseg6e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8m1(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e8_v_i8m1(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8( @@ -108,7 +108,7 @@ void test_vlseg6e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4( @@ -129,7 +129,7 @@ void test_vlseg6e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2( @@ -150,7 +150,7 @@ void test_vlseg6e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1( @@ -171,7 +171,7 @@ void test_vlseg6e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8m1(v0, v1, v2, v3, v4, v5, base, vl); + return __riscv_vlseg6e8_v_u8m1(v0, v1, v2, v3, v4, v5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( @@ -192,7 +192,7 @@ void test_vlseg6e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( @@ -213,7 +213,7 @@ void test_vlseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( @@ -234,7 +234,7 @@ void test_vlseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( @@ -255,7 +255,7 @@ void test_vlseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( @@ -276,7 +276,7 @@ void test_vlseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( @@ -297,7 +297,7 @@ void test_vlseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( @@ -318,7 +318,7 @@ void test_vlseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( @@ -339,6 +339,6 @@ void test_vlseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); + return __riscv_vlseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c index 3720f925666e..c3f5840a4460 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg6e8ff.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4( @@ -50,7 +50,7 @@ void test_vlseg6e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2( @@ -73,7 +73,7 @@ void test_vlseg6e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1( @@ -96,7 +96,7 @@ void test_vlseg6e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8( @@ -119,7 +119,7 @@ void test_vlseg6e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4( @@ -142,7 +142,7 @@ void test_vlseg6e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2( @@ -165,7 +165,7 @@ void test_vlseg6e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1( @@ -188,7 +188,7 @@ void test_vlseg6e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( @@ -211,7 +211,7 @@ void test_vlseg6e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuin // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( @@ -234,7 +234,7 @@ void test_vlseg6e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( @@ -257,7 +257,7 @@ void test_vlseg6e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( @@ -280,7 +280,7 @@ void test_vlseg6e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( @@ -303,7 +303,7 @@ void test_vlseg6e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( @@ -326,7 +326,7 @@ void test_vlseg6e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( @@ -349,7 +349,7 @@ void test_vlseg6e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( @@ -372,6 +372,6 @@ void test_vlseg6e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16.c index f1c7fe460897..c51042e434ae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2( @@ -50,7 +50,7 @@ void test_vlseg7e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1( @@ -73,7 +73,7 @@ void test_vlseg7e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4( @@ -96,7 +96,7 @@ void test_vlseg7e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2( @@ -119,7 +119,7 @@ void test_vlseg7e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1( @@ -142,7 +142,7 @@ void test_vlseg7e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4( @@ -165,7 +165,7 @@ void test_vlseg7e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2( @@ -188,7 +188,7 @@ void test_vlseg7e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1( @@ -211,7 +211,7 @@ void test_vlseg7e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_m( @@ -234,7 +234,7 @@ void test_vlseg7e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_m( @@ -257,7 +257,7 @@ void test_vlseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_m( @@ -280,7 +280,7 @@ void test_vlseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( @@ -303,7 +303,7 @@ void test_vlseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( @@ -326,7 +326,7 @@ void test_vlseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( @@ -349,7 +349,7 @@ void test_vlseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( @@ -372,7 +372,7 @@ void test_vlseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( @@ -395,7 +395,7 @@ void test_vlseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( @@ -418,6 +418,6 @@ void test_vlseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c index a0762903551b..123708307318 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e16ff.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2( @@ -54,7 +54,7 @@ void test_vlseg7e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1( @@ -79,7 +79,7 @@ void test_vlseg7e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4( @@ -104,7 +104,7 @@ void test_vlseg7e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2( @@ -129,7 +129,7 @@ void test_vlseg7e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1( @@ -154,7 +154,7 @@ void test_vlseg7e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4( @@ -179,7 +179,7 @@ void test_vlseg7e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2( @@ -204,7 +204,7 @@ void test_vlseg7e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1( @@ -229,7 +229,7 @@ void test_vlseg7e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_m( @@ -254,7 +254,7 @@ void test_vlseg7e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_m( @@ -279,7 +279,7 @@ void test_vlseg7e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_m( @@ -304,7 +304,7 @@ void test_vlseg7e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( @@ -329,7 +329,7 @@ void test_vlseg7e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( @@ -354,7 +354,7 @@ void test_vlseg7e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( @@ -379,7 +379,7 @@ void test_vlseg7e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( @@ -404,7 +404,7 @@ void test_vlseg7e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( @@ -429,7 +429,7 @@ void test_vlseg7e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( @@ -454,6 +454,6 @@ void test_vlseg7e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32.c index 5ee74619bcce..73af9f482f9c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1( @@ -50,7 +50,7 @@ void test_vlseg7e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, size_t vl) { - return vlseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2( @@ -73,7 +73,7 @@ void test_vlseg7e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1( @@ -96,7 +96,7 @@ void test_vlseg7e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2( @@ -119,7 +119,7 @@ void test_vlseg7e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1( @@ -142,7 +142,7 @@ void test_vlseg7e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( @@ -165,7 +165,7 @@ void test_vlseg7e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( @@ -188,7 +188,7 @@ void test_vlseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, size_t vl) { - return vlseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( @@ -211,7 +211,7 @@ void test_vlseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( @@ -234,7 +234,7 @@ void test_vlseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( @@ -257,7 +257,7 @@ void test_vlseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( @@ -280,6 +280,6 @@ void test_vlseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c index db505f155748..134b2ff0c4fa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e32ff.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1( @@ -54,7 +54,7 @@ void test_vlseg7e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2( @@ -79,7 +79,7 @@ void test_vlseg7e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1( @@ -104,7 +104,7 @@ void test_vlseg7e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2( @@ -129,7 +129,7 @@ void test_vlseg7e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1( @@ -154,7 +154,7 @@ void test_vlseg7e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( @@ -179,7 +179,7 @@ void test_vlseg7e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( @@ -204,7 +204,7 @@ void test_vlseg7e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( @@ -229,7 +229,7 @@ void test_vlseg7e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( @@ -254,7 +254,7 @@ void test_vlseg7e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( @@ -279,7 +279,7 @@ void test_vlseg7e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( @@ -304,6 +304,6 @@ void test_vlseg7e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64.c index 7b6eb3e3542f..2e6ebf1ce92a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, size_t vl) { - return vlseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1( @@ -50,7 +50,7 @@ void test_vlseg7e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, size_t vl) { - return vlseg7e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1( @@ -73,7 +73,7 @@ void test_vlseg7e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, size_t vl) { - return vlseg7e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( @@ -96,7 +96,7 @@ void test_vlseg7e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, size_t vl) { - return vlseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( @@ -119,7 +119,7 @@ void test_vlseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, size_t vl) { - return vlseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( @@ -142,6 +142,6 @@ void test_vlseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, size_t vl) { - return vlseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c index 233bb52c9297..beae1e0cdd5a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e64ff.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1( @@ -54,7 +54,7 @@ void test_vlseg7e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1( @@ -79,7 +79,7 @@ void test_vlseg7e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( @@ -104,7 +104,7 @@ void test_vlseg7e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( @@ -129,7 +129,7 @@ void test_vlseg7e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( @@ -154,6 +154,6 @@ void test_vlseg7e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8.c index f3a5baef4b80..35a7e9738988 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8.c @@ -26,7 +26,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4( @@ -49,7 +49,7 @@ void test_vlseg7e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2( @@ -72,7 +72,7 @@ void test_vlseg7e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1( @@ -95,7 +95,7 @@ void test_vlseg7e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8( @@ -118,7 +118,7 @@ void test_vlseg7e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4( @@ -141,7 +141,7 @@ void test_vlseg7e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2( @@ -164,7 +164,7 @@ void test_vlseg7e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1( @@ -187,7 +187,7 @@ void test_vlseg7e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, vl); + return __riscv_vlseg7e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( @@ -210,7 +210,7 @@ void test_vlseg7e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( @@ -233,7 +233,7 @@ void test_vlseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( @@ -256,7 +256,7 @@ void test_vlseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( @@ -279,7 +279,7 @@ void test_vlseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( @@ -302,7 +302,7 @@ void test_vlseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( @@ -325,7 +325,7 @@ void test_vlseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( @@ -348,7 +348,7 @@ void test_vlseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( @@ -371,6 +371,6 @@ void test_vlseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); + return __riscv_vlseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c index 476363946bb0..a0890bb9909d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg7e8ff.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4( @@ -54,7 +54,7 @@ void test_vlseg7e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2( @@ -79,7 +79,7 @@ void test_vlseg7e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1( @@ -104,7 +104,7 @@ void test_vlseg7e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8( @@ -129,7 +129,7 @@ void test_vlseg7e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4( @@ -154,7 +154,7 @@ void test_vlseg7e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2( @@ -179,7 +179,7 @@ void test_vlseg7e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1( @@ -204,7 +204,7 @@ void test_vlseg7e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( @@ -229,7 +229,7 @@ void test_vlseg7e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuin // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( @@ -254,7 +254,7 @@ void test_vlseg7e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( @@ -279,7 +279,7 @@ void test_vlseg7e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( @@ -304,7 +304,7 @@ void test_vlseg7e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( @@ -329,7 +329,7 @@ void test_vlseg7e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( @@ -354,7 +354,7 @@ void test_vlseg7e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( @@ -379,7 +379,7 @@ void test_vlseg7e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( @@ -404,6 +404,6 @@ void test_vlseg7e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16.c index b16c3fab61da..01016492409d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2( @@ -54,7 +54,7 @@ void test_vlseg8e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1( @@ -79,7 +79,7 @@ void test_vlseg8e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4( @@ -104,7 +104,7 @@ void test_vlseg8e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2( @@ -129,7 +129,7 @@ void test_vlseg8e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1( @@ -154,7 +154,7 @@ void test_vlseg8e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4( @@ -179,7 +179,7 @@ void test_vlseg8e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2( @@ -204,7 +204,7 @@ void test_vlseg8e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1( @@ -229,7 +229,7 @@ void test_vlseg8e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_m( @@ -254,7 +254,7 @@ void test_vlseg8e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_m( @@ -279,7 +279,7 @@ void test_vlseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_m( @@ -304,7 +304,7 @@ void test_vlseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( @@ -329,7 +329,7 @@ void test_vlseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( @@ -354,7 +354,7 @@ void test_vlseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( @@ -379,7 +379,7 @@ void test_vlseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( @@ -404,7 +404,7 @@ void test_vlseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( @@ -429,7 +429,7 @@ void test_vlseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( @@ -454,6 +454,6 @@ void test_vlseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c index c4df021c5a0b..bb9010273ac6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e16ff.c @@ -31,7 +31,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2( @@ -58,7 +58,7 @@ void test_vlseg8e16ff_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1( @@ -85,7 +85,7 @@ void test_vlseg8e16ff_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4( @@ -112,7 +112,7 @@ void test_vlseg8e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2( @@ -139,7 +139,7 @@ void test_vlseg8e16ff_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1( @@ -166,7 +166,7 @@ void test_vlseg8e16ff_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4( @@ -193,7 +193,7 @@ void test_vlseg8e16ff_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2( @@ -220,7 +220,7 @@ void test_vlseg8e16ff_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1( @@ -247,7 +247,7 @@ void test_vlseg8e16ff_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_m( @@ -274,7 +274,7 @@ void test_vlseg8e16ff_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_m( @@ -301,7 +301,7 @@ void test_vlseg8e16ff_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_m( @@ -328,7 +328,7 @@ void test_vlseg8e16ff_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( @@ -355,7 +355,7 @@ void test_vlseg8e16ff_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( @@ -382,7 +382,7 @@ void test_vlseg8e16ff_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( @@ -409,7 +409,7 @@ void test_vlseg8e16ff_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( @@ -436,7 +436,7 @@ void test_vlseg8e16ff_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( @@ -463,7 +463,7 @@ void test_vlseg8e16ff_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( @@ -490,6 +490,6 @@ void test_vlseg8e16ff_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32.c index 09bdbbede673..ac6354a458d1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1( @@ -54,7 +54,7 @@ void test_vlseg8e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, size_t vl) { - return vlseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2( @@ -79,7 +79,7 @@ void test_vlseg8e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1( @@ -104,7 +104,7 @@ void test_vlseg8e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2( @@ -129,7 +129,7 @@ void test_vlseg8e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1( @@ -154,7 +154,7 @@ void test_vlseg8e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( @@ -179,7 +179,7 @@ void test_vlseg8e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( @@ -204,7 +204,7 @@ void test_vlseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, size_t vl) { - return vlseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( @@ -229,7 +229,7 @@ void test_vlseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( @@ -254,7 +254,7 @@ void test_vlseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( @@ -279,7 +279,7 @@ void test_vlseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( @@ -304,6 +304,6 @@ void test_vlseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c index a6b550bdc42b..6e6cbf6884fc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e32ff.c @@ -31,7 +31,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1( @@ -58,7 +58,7 @@ void test_vlseg8e32ff_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2( @@ -85,7 +85,7 @@ void test_vlseg8e32ff_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1( @@ -112,7 +112,7 @@ void test_vlseg8e32ff_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2( @@ -139,7 +139,7 @@ void test_vlseg8e32ff_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1( @@ -166,7 +166,7 @@ void test_vlseg8e32ff_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( @@ -193,7 +193,7 @@ void test_vlseg8e32ff_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( @@ -220,7 +220,7 @@ void test_vlseg8e32ff_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( @@ -247,7 +247,7 @@ void test_vlseg8e32ff_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( @@ -274,7 +274,7 @@ void test_vlseg8e32ff_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( @@ -301,7 +301,7 @@ void test_vlseg8e32ff_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( @@ -328,6 +328,6 @@ void test_vlseg8e32ff_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64.c index 2b25009ab274..d041e915e4e6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, size_t vl) { - return vlseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1( @@ -54,7 +54,7 @@ void test_vlseg8e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, size_t vl) { - return vlseg8e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1( @@ -79,7 +79,7 @@ void test_vlseg8e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, size_t vl) { - return vlseg8e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( @@ -104,7 +104,7 @@ void test_vlseg8e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, size_t vl) { - return vlseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( @@ -129,7 +129,7 @@ void test_vlseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, size_t vl) { - return vlseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( @@ -154,6 +154,6 @@ void test_vlseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, size_t vl) { - return vlseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c index 7a6645c15e13..1fb1692bcf1e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e64ff.c @@ -31,7 +31,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1( @@ -58,7 +58,7 @@ void test_vlseg8e64ff_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1( @@ -85,7 +85,7 @@ void test_vlseg8e64ff_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( @@ -112,7 +112,7 @@ void test_vlseg8e64ff_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( @@ -139,7 +139,7 @@ void test_vlseg8e64ff_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( @@ -166,6 +166,6 @@ void test_vlseg8e64ff_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8.c index 193ae7913090..c1ac554b89c6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8.c @@ -28,7 +28,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4( @@ -53,7 +53,7 @@ void test_vlseg8e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2( @@ -78,7 +78,7 @@ void test_vlseg8e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1( @@ -103,7 +103,7 @@ void test_vlseg8e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8( @@ -128,7 +128,7 @@ void test_vlseg8e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4( @@ -153,7 +153,7 @@ void test_vlseg8e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2( @@ -178,7 +178,7 @@ void test_vlseg8e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1( @@ -203,7 +203,7 @@ void test_vlseg8e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); + return __riscv_vlseg8e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( @@ -228,7 +228,7 @@ void test_vlseg8e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( @@ -253,7 +253,7 @@ void test_vlseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( @@ -278,7 +278,7 @@ void test_vlseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( @@ -303,7 +303,7 @@ void test_vlseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( @@ -328,7 +328,7 @@ void test_vlseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( @@ -353,7 +353,7 @@ void test_vlseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( @@ -378,7 +378,7 @@ void test_vlseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( @@ -403,6 +403,6 @@ void test_vlseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); + return __riscv_vlseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c index 6aee11d46d61..45325b57152e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg8e8ff.c @@ -31,7 +31,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4( @@ -58,7 +58,7 @@ void test_vlseg8e8ff_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2( @@ -85,7 +85,7 @@ void test_vlseg8e8ff_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1( @@ -112,7 +112,7 @@ void test_vlseg8e8ff_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8( @@ -139,7 +139,7 @@ void test_vlseg8e8ff_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4( @@ -166,7 +166,7 @@ void test_vlseg8e8ff_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2( @@ -193,7 +193,7 @@ void test_vlseg8e8ff_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1( @@ -220,7 +220,7 @@ void test_vlseg8e8ff_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( @@ -247,7 +247,7 @@ void test_vlseg8e8ff_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuin // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( @@ -274,7 +274,7 @@ void test_vlseg8e8ff_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( @@ -301,7 +301,7 @@ void test_vlseg8e8ff_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vlseg8e8ff_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( @@ -355,7 +355,7 @@ void test_vlseg8e8ff_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( @@ -382,7 +382,7 @@ void test_vlseg8e8ff_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vlseg8e8ff_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( @@ -436,6 +436,6 @@ void test_vlseg8e8ff_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c index 8643592475bb..82f2c67d2c71 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2( @@ -30,7 +30,7 @@ void test_vlsseg2e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1( @@ -43,7 +43,7 @@ void test_vlsseg2e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m1(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m1(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2( @@ -56,7 +56,7 @@ void test_vlsseg2e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4( @@ -69,7 +69,7 @@ void test_vlsseg2e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4( @@ -82,7 +82,7 @@ void test_vlsseg2e16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2( @@ -95,7 +95,7 @@ void test_vlsseg2e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1( @@ -108,7 +108,7 @@ void test_vlsseg2e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m1(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m1(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2( @@ -121,7 +121,7 @@ void test_vlsseg2e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4( @@ -134,7 +134,7 @@ void test_vlsseg2e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4( @@ -147,7 +147,7 @@ void test_vlsseg2e16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2( @@ -160,7 +160,7 @@ void test_vlsseg2e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1( @@ -173,7 +173,7 @@ void test_vlsseg2e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m1(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m1(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2( @@ -186,7 +186,7 @@ void test_vlsseg2e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4( @@ -199,7 +199,7 @@ void test_vlsseg2e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_m( @@ -212,7 +212,7 @@ void test_vlsseg2e16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_m( @@ -225,7 +225,7 @@ void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_m( @@ -238,7 +238,7 @@ void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m1_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_m( @@ -251,7 +251,7 @@ void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_m( @@ -264,7 +264,7 @@ void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_m( @@ -277,7 +277,7 @@ void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_m( @@ -290,7 +290,7 @@ void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_m( @@ -303,7 +303,7 @@ void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m1_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_m( @@ -316,7 +316,7 @@ void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_m( @@ -329,7 +329,7 @@ void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_m( @@ -342,7 +342,7 @@ void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_m( @@ -355,7 +355,7 @@ void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_m( @@ -368,7 +368,7 @@ void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m1_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_m( @@ -381,7 +381,7 @@ void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_m( @@ -394,6 +394,6 @@ void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m4_m(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c index 6e9689a63e7d..cb29a7cdc6c0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32mf2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32mf2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1( @@ -30,7 +30,7 @@ void test_vlsseg2e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m1(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m1(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2( @@ -43,7 +43,7 @@ void test_vlsseg2e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *ba // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4( @@ -56,7 +56,7 @@ void test_vlsseg2e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *ba // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2( @@ -69,7 +69,7 @@ void test_vlsseg2e32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *ba // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32mf2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32mf2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1( @@ -82,7 +82,7 @@ void test_vlsseg2e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m1(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m1(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2( @@ -95,7 +95,7 @@ void test_vlsseg2e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4( @@ -108,7 +108,7 @@ void test_vlsseg2e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2( @@ -121,7 +121,7 @@ void test_vlsseg2e32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32mf2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32mf2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1( @@ -134,7 +134,7 @@ void test_vlsseg2e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m1(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m1(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2( @@ -147,7 +147,7 @@ void test_vlsseg2e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4( @@ -160,7 +160,7 @@ void test_vlsseg2e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_m( @@ -173,7 +173,7 @@ void test_vlsseg2e32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32mf2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_m( @@ -186,7 +186,7 @@ void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m1_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_m( @@ -199,7 +199,7 @@ void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_m( @@ -212,7 +212,7 @@ void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_m( @@ -225,7 +225,7 @@ void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32mf2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_m( @@ -238,7 +238,7 @@ void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m1_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_m( @@ -251,7 +251,7 @@ void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_m( @@ -264,7 +264,7 @@ void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_m( @@ -277,7 +277,7 @@ void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32mf2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_m( @@ -290,7 +290,7 @@ void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m1_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_m( @@ -303,7 +303,7 @@ void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_m( @@ -316,6 +316,6 @@ void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m4_m(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c index 5f89e4fc0db9..e53719853924 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m1(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m1(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2( @@ -30,7 +30,7 @@ void test_vlsseg2e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4( @@ -43,7 +43,7 @@ void test_vlsseg2e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1( @@ -56,7 +56,7 @@ void test_vlsseg2e64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m1(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m1(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2( @@ -69,7 +69,7 @@ void test_vlsseg2e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4( @@ -82,7 +82,7 @@ void test_vlsseg2e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1( @@ -95,7 +95,7 @@ void test_vlsseg2e64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m1(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m1(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2( @@ -108,7 +108,7 @@ void test_vlsseg2e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4( @@ -121,7 +121,7 @@ void test_vlsseg2e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_m( @@ -134,7 +134,7 @@ void test_vlsseg2e64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *b // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m1_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_m( @@ -147,7 +147,7 @@ void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_m( @@ -160,7 +160,7 @@ void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_m( @@ -173,7 +173,7 @@ void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m1_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_m( @@ -186,7 +186,7 @@ void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_m( @@ -199,7 +199,7 @@ void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_m( @@ -212,7 +212,7 @@ void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, c // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m1_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_m( @@ -225,7 +225,7 @@ void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_m( @@ -238,6 +238,6 @@ void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m4_m(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c index ba42f3aed9db..0341e7dd8ced 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf8(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf8(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4( @@ -29,7 +29,7 @@ void test_vlsseg2e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2( @@ -42,7 +42,7 @@ void test_vlsseg2e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1( @@ -55,7 +55,7 @@ void test_vlsseg2e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m1(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m1(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2( @@ -68,7 +68,7 @@ void test_vlsseg2e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, ptr // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4( @@ -81,7 +81,7 @@ void test_vlsseg2e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, ptr // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8( @@ -94,7 +94,7 @@ void test_vlsseg2e8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, ptr // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf8(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf8(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4( @@ -107,7 +107,7 @@ void test_vlsseg2e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2( @@ -120,7 +120,7 @@ void test_vlsseg2e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1( @@ -133,7 +133,7 @@ void test_vlsseg2e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m1(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m1(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2( @@ -146,7 +146,7 @@ void test_vlsseg2e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m2(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m2(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4( @@ -159,7 +159,7 @@ void test_vlsseg2e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m4(v0, v1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m4(v0, v1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_m( @@ -172,7 +172,7 @@ void test_vlsseg2e8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf8_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf8_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_m( @@ -185,7 +185,7 @@ void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_m( @@ -198,7 +198,7 @@ void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_m( @@ -211,7 +211,7 @@ void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, co // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m1_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_m( @@ -224,7 +224,7 @@ void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_m( @@ -237,7 +237,7 @@ void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_m( @@ -250,7 +250,7 @@ void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf8_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf8_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_m( @@ -263,7 +263,7 @@ void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf4_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_m( @@ -276,7 +276,7 @@ void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_m( @@ -289,7 +289,7 @@ void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m1_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m1_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_m( @@ -302,7 +302,7 @@ void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, cons // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m2_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m2_m(v0, v1, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_m( @@ -315,6 +315,6 @@ void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, cons // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m4_m(v0, v1, mask, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m4_m(v0, v1, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c index 8e13916478ae..708b02e9c0cc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf4(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf4(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2( @@ -34,7 +34,7 @@ void test_vlsseg3e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1( @@ -49,7 +49,7 @@ void test_vlsseg3e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m1(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m1(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2( @@ -64,7 +64,7 @@ void test_vlsseg3e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4( @@ -79,7 +79,7 @@ void test_vlsseg3e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf4(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf4(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2( @@ -94,7 +94,7 @@ void test_vlsseg3e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1( @@ -109,7 +109,7 @@ void test_vlsseg3e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m1(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m1(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2( @@ -124,7 +124,7 @@ void test_vlsseg3e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4( @@ -139,7 +139,7 @@ void test_vlsseg3e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf4(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf4(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2( @@ -154,7 +154,7 @@ void test_vlsseg3e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1( @@ -169,7 +169,7 @@ void test_vlsseg3e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m1(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m1(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2( @@ -184,7 +184,7 @@ void test_vlsseg3e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_m( @@ -199,7 +199,7 @@ void test_vlsseg3e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf4_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf4_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_m( @@ -214,7 +214,7 @@ void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_m( @@ -229,7 +229,7 @@ void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m1_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_m( @@ -244,7 +244,7 @@ void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_m( @@ -259,7 +259,7 @@ void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf4_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf4_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_m( @@ -274,7 +274,7 @@ void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_m( @@ -289,7 +289,7 @@ void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m1_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_m( @@ -304,7 +304,7 @@ void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_m( @@ -319,7 +319,7 @@ void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf4_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf4_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_m( @@ -334,7 +334,7 @@ void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_m( @@ -349,7 +349,7 @@ void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m1_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_m( @@ -364,6 +364,6 @@ void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m2_m(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c index 614d805d0c1a..b6584fbffac0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32mf2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32mf2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1( @@ -34,7 +34,7 @@ void test_vlsseg3e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m1(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m1(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2( @@ -49,7 +49,7 @@ void test_vlsseg3e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2( @@ -64,7 +64,7 @@ void test_vlsseg3e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32mf2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32mf2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1( @@ -79,7 +79,7 @@ void test_vlsseg3e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m1(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m1(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2( @@ -94,7 +94,7 @@ void test_vlsseg3e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2( @@ -109,7 +109,7 @@ void test_vlsseg3e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32mf2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32mf2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1( @@ -124,7 +124,7 @@ void test_vlsseg3e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m1(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m1(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2( @@ -139,7 +139,7 @@ void test_vlsseg3e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_m( @@ -154,7 +154,7 @@ void test_vlsseg3e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32mf2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_m( @@ -169,7 +169,7 @@ void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m1_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_m( @@ -184,7 +184,7 @@ void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_m( @@ -199,7 +199,7 @@ void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32mf2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_m( @@ -214,7 +214,7 @@ void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m1_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_m( @@ -229,7 +229,7 @@ void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_m( @@ -244,7 +244,7 @@ void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32mf2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_m( @@ -259,7 +259,7 @@ void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m1_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_m( @@ -274,6 +274,6 @@ void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m2_m(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c index 441151edd081..73e8a564775b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m1(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m1(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2( @@ -34,7 +34,7 @@ void test_vlsseg3e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1( @@ -49,7 +49,7 @@ void test_vlsseg3e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m1(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m1(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2( @@ -64,7 +64,7 @@ void test_vlsseg3e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1( @@ -79,7 +79,7 @@ void test_vlsseg3e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m1(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m1(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2( @@ -94,7 +94,7 @@ void test_vlsseg3e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_m( @@ -109,7 +109,7 @@ void test_vlsseg3e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m1_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_m( @@ -124,7 +124,7 @@ void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_m( @@ -139,7 +139,7 @@ void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m1_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_m( @@ -154,7 +154,7 @@ void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_m( @@ -169,7 +169,7 @@ void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m1_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_m( @@ -184,6 +184,6 @@ void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m2_m(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c index 2af160fc427c..eec4edc5c11b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c @@ -18,7 +18,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf8(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf8(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4( @@ -33,7 +33,7 @@ void test_vlsseg3e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, cons // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf4(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf4(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2( @@ -48,7 +48,7 @@ void test_vlsseg3e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, cons // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1( @@ -63,7 +63,7 @@ void test_vlsseg3e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, cons // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m1(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m1(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2( @@ -78,7 +78,7 @@ void test_vlsseg3e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const in // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8( @@ -93,7 +93,7 @@ void test_vlsseg3e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const in // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf8(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf8(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4( @@ -108,7 +108,7 @@ void test_vlsseg3e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf4(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf4(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2( @@ -123,7 +123,7 @@ void test_vlsseg3e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1( @@ -138,7 +138,7 @@ void test_vlsseg3e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m1(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m1(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2( @@ -153,7 +153,7 @@ void test_vlsseg3e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m2(v0, v1, v2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m2(v0, v1, v2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_m( @@ -168,7 +168,7 @@ void test_vlsseg3e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf8_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf8_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_m( @@ -183,7 +183,7 @@ void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf4_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf4_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_m( @@ -198,7 +198,7 @@ void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_m( @@ -213,7 +213,7 @@ void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m1_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_m( @@ -228,7 +228,7 @@ void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_m( @@ -243,7 +243,7 @@ void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf8_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf8_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_m( @@ -258,7 +258,7 @@ void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf4_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf4_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_m( @@ -273,7 +273,7 @@ void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf2_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_m( @@ -288,7 +288,7 @@ void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m1_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m1_m(v0, v1, v2, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_m( @@ -303,6 +303,6 @@ void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m2_m(v0, v1, v2, mask, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m2_m(v0, v1, v2, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c index cd6e0a179735..4a54cac63e99 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf4(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf4(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2( @@ -38,7 +38,7 @@ void test_vlsseg4e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1( @@ -55,7 +55,7 @@ void test_vlsseg4e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m1(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m1(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2( @@ -72,7 +72,7 @@ void test_vlsseg4e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4( @@ -89,7 +89,7 @@ void test_vlsseg4e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf4(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf4(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2( @@ -106,7 +106,7 @@ void test_vlsseg4e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1( @@ -123,7 +123,7 @@ void test_vlsseg4e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m1(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m1(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2( @@ -140,7 +140,7 @@ void test_vlsseg4e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4( @@ -157,7 +157,7 @@ void test_vlsseg4e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf4(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf4(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2( @@ -174,7 +174,7 @@ void test_vlsseg4e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1( @@ -191,7 +191,7 @@ void test_vlsseg4e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m1(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m1(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2( @@ -208,7 +208,7 @@ void test_vlsseg4e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_m( @@ -225,7 +225,7 @@ void test_vlsseg4e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_m( @@ -242,7 +242,7 @@ void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_m( @@ -259,7 +259,7 @@ void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_m( @@ -276,7 +276,7 @@ void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_m( @@ -293,7 +293,7 @@ void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_m( @@ -310,7 +310,7 @@ void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_m( @@ -327,7 +327,7 @@ void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_m( @@ -344,7 +344,7 @@ void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_m( @@ -361,7 +361,7 @@ void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_m( @@ -378,7 +378,7 @@ void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_m( @@ -395,7 +395,7 @@ void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_m( @@ -412,6 +412,6 @@ void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c index e839d2061bcb..1cd49c509bd0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32mf2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32mf2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1( @@ -38,7 +38,7 @@ void test_vlsseg4e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m1(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m1(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2( @@ -55,7 +55,7 @@ void test_vlsseg4e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2( @@ -72,7 +72,7 @@ void test_vlsseg4e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32mf2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32mf2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1( @@ -89,7 +89,7 @@ void test_vlsseg4e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m1(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m1(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2( @@ -106,7 +106,7 @@ void test_vlsseg4e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2( @@ -123,7 +123,7 @@ void test_vlsseg4e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32mf2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32mf2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1( @@ -140,7 +140,7 @@ void test_vlsseg4e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m1(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m1(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2( @@ -157,7 +157,7 @@ void test_vlsseg4e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_m( @@ -174,7 +174,7 @@ void test_vlsseg4e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_m( @@ -191,7 +191,7 @@ void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_m( @@ -208,7 +208,7 @@ void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_m( @@ -225,7 +225,7 @@ void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_m( @@ -242,7 +242,7 @@ void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_m( @@ -259,7 +259,7 @@ void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_m( @@ -276,7 +276,7 @@ void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_m( @@ -293,7 +293,7 @@ void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_m( @@ -310,6 +310,6 @@ void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c index 546a89d61f04..33cc59372bd0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m1(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m1(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2( @@ -38,7 +38,7 @@ void test_vlsseg4e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1( @@ -55,7 +55,7 @@ void test_vlsseg4e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m1(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m1(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2( @@ -72,7 +72,7 @@ void test_vlsseg4e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1( @@ -89,7 +89,7 @@ void test_vlsseg4e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m1(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m1(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2( @@ -106,7 +106,7 @@ void test_vlsseg4e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_m( @@ -123,7 +123,7 @@ void test_vlsseg4e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_m( @@ -140,7 +140,7 @@ void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_m( @@ -157,7 +157,7 @@ void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_m( @@ -174,7 +174,7 @@ void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_m( @@ -191,7 +191,7 @@ void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_m( @@ -208,6 +208,6 @@ void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c index 7b5af168dd80..ebd4a8c2b0e1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf8(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf8(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4( @@ -37,7 +37,7 @@ void test_vlsseg4e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf4(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf4(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2( @@ -54,7 +54,7 @@ void test_vlsseg4e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1( @@ -71,7 +71,7 @@ void test_vlsseg4e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m1(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m1(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2( @@ -88,7 +88,7 @@ void test_vlsseg4e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8( @@ -105,7 +105,7 @@ void test_vlsseg4e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf8(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf8(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4( @@ -122,7 +122,7 @@ void test_vlsseg4e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf4(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf4(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2( @@ -139,7 +139,7 @@ void test_vlsseg4e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1( @@ -156,7 +156,7 @@ void test_vlsseg4e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m1(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m1(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2( @@ -173,7 +173,7 @@ void test_vlsseg4e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m2(v0, v1, v2, v3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m2(v0, v1, v2, v3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_m( @@ -190,7 +190,7 @@ void test_vlsseg4e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_m( @@ -207,7 +207,7 @@ void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_m( @@ -224,7 +224,7 @@ void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_m( @@ -241,7 +241,7 @@ void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_m( @@ -258,7 +258,7 @@ void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_m( @@ -275,7 +275,7 @@ void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_m( @@ -292,7 +292,7 @@ void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_m( @@ -309,7 +309,7 @@ void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_m( @@ -326,7 +326,7 @@ void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_m( @@ -343,6 +343,6 @@ void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c index 457130f42a7d..bd149e0a1b25 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2( @@ -42,7 +42,7 @@ void test_vlsseg5e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1( @@ -61,7 +61,7 @@ void test_vlsseg5e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4( @@ -80,7 +80,7 @@ void test_vlsseg5e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf4(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf4(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2( @@ -99,7 +99,7 @@ void test_vlsseg5e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf2(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf2(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1( @@ -118,7 +118,7 @@ void test_vlsseg5e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16m1(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16m1(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4( @@ -137,7 +137,7 @@ void test_vlsseg5e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf4(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf4(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2( @@ -156,7 +156,7 @@ void test_vlsseg5e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf2(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf2(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1( @@ -175,7 +175,7 @@ void test_vlsseg5e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16m1(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16m1(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_m( @@ -194,7 +194,7 @@ void test_vlsseg5e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_m( @@ -213,7 +213,7 @@ void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_m( @@ -232,7 +232,7 @@ void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_m( @@ -251,7 +251,7 @@ void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_m( @@ -270,7 +270,7 @@ void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_m( @@ -289,7 +289,7 @@ void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_m( @@ -308,7 +308,7 @@ void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_m( @@ -327,7 +327,7 @@ void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_m( @@ -346,6 +346,6 @@ void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c index 79637d1676a7..6d95635e9a3a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1( @@ -42,7 +42,7 @@ void test_vlsseg5e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2( @@ -61,7 +61,7 @@ void test_vlsseg5e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32mf2(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32mf2(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1( @@ -80,7 +80,7 @@ void test_vlsseg5e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32m1(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32m1(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2( @@ -99,7 +99,7 @@ void test_vlsseg5e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32mf2(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32mf2(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1( @@ -118,7 +118,7 @@ void test_vlsseg5e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32m1(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32m1(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_m( @@ -137,7 +137,7 @@ void test_vlsseg5e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_m( @@ -156,7 +156,7 @@ void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_m( @@ -175,7 +175,7 @@ void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_m( @@ -194,7 +194,7 @@ void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_m( @@ -213,7 +213,7 @@ void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_m( @@ -232,6 +232,6 @@ void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c index 2e1cfd782e48..115569da245b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1( @@ -42,7 +42,7 @@ void test_vlsseg5e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_i64m1(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e64_v_i64m1(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1( @@ -61,7 +61,7 @@ void test_vlsseg5e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_u64m1(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e64_v_u64m1(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_m( @@ -80,7 +80,7 @@ void test_vlsseg5e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_m( @@ -99,7 +99,7 @@ void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_m( @@ -118,6 +118,6 @@ void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c index ee53744a1639..d18a8dd030f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c @@ -22,7 +22,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf8(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf8(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4( @@ -41,7 +41,7 @@ void test_vlsseg5e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf4(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf4(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2( @@ -60,7 +60,7 @@ void test_vlsseg5e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf2(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf2(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1( @@ -79,7 +79,7 @@ void test_vlsseg5e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8m1(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8m1(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8( @@ -98,7 +98,7 @@ void test_vlsseg5e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf8(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf8(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4( @@ -117,7 +117,7 @@ void test_vlsseg5e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf4(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf4(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2( @@ -136,7 +136,7 @@ void test_vlsseg5e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf2(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf2(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1( @@ -155,7 +155,7 @@ void test_vlsseg5e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8m1(v0, v1, v2, v3, v4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8m1(v0, v1, v2, v3, v4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_m( @@ -174,7 +174,7 @@ void test_vlsseg5e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_m( @@ -193,7 +193,7 @@ void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_m( @@ -212,7 +212,7 @@ void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_m( @@ -231,7 +231,7 @@ void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_m( @@ -250,7 +250,7 @@ void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_m( @@ -269,7 +269,7 @@ void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_m( @@ -288,7 +288,7 @@ void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_m( @@ -307,6 +307,6 @@ void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c index bc766a7a2308..88538bfa757d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2( @@ -46,7 +46,7 @@ void test_vlsseg6e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1( @@ -67,7 +67,7 @@ void test_vlsseg6e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4( @@ -88,7 +88,7 @@ void test_vlsseg6e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2( @@ -109,7 +109,7 @@ void test_vlsseg6e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1( @@ -130,7 +130,7 @@ void test_vlsseg6e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4( @@ -151,7 +151,7 @@ void test_vlsseg6e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2( @@ -172,7 +172,7 @@ void test_vlsseg6e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1( @@ -193,7 +193,7 @@ void test_vlsseg6e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_m( @@ -214,7 +214,7 @@ void test_vlsseg6e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_m( @@ -235,7 +235,7 @@ void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_m( @@ -256,7 +256,7 @@ void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_m( @@ -277,7 +277,7 @@ void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_m( @@ -298,7 +298,7 @@ void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_m( @@ -319,7 +319,7 @@ void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_m( @@ -340,7 +340,7 @@ void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_m( @@ -361,7 +361,7 @@ void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_m( @@ -382,6 +382,6 @@ void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c index 8fba07179fe9..253fa579b949 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1( @@ -46,7 +46,7 @@ void test_vlsseg6e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2( @@ -67,7 +67,7 @@ void test_vlsseg6e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1( @@ -88,7 +88,7 @@ void test_vlsseg6e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2( @@ -109,7 +109,7 @@ void test_vlsseg6e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1( @@ -130,7 +130,7 @@ void test_vlsseg6e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_m( @@ -151,7 +151,7 @@ void test_vlsseg6e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_m( @@ -172,7 +172,7 @@ void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_m( @@ -193,7 +193,7 @@ void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_m( @@ -214,7 +214,7 @@ void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_m( @@ -235,7 +235,7 @@ void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_m( @@ -256,6 +256,6 @@ void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c index c62a814bac0d..b34e926d6d48 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1( @@ -46,7 +46,7 @@ void test_vlsseg6e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_i64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e64_v_i64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1( @@ -67,7 +67,7 @@ void test_vlsseg6e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_u64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e64_v_u64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_m( @@ -88,7 +88,7 @@ void test_vlsseg6e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_m( @@ -109,7 +109,7 @@ void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_m( @@ -130,6 +130,6 @@ void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c index 1103717ea92b..d88a1241bc5f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c @@ -24,7 +24,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4( @@ -45,7 +45,7 @@ void test_vlsseg6e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2( @@ -66,7 +66,7 @@ void test_vlsseg6e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1( @@ -87,7 +87,7 @@ void test_vlsseg6e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8( @@ -108,7 +108,7 @@ void test_vlsseg6e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4( @@ -129,7 +129,7 @@ void test_vlsseg6e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2( @@ -150,7 +150,7 @@ void test_vlsseg6e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1( @@ -171,7 +171,7 @@ void test_vlsseg6e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_m( @@ -192,7 +192,7 @@ void test_vlsseg6e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_m( @@ -213,7 +213,7 @@ void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_m( @@ -234,7 +234,7 @@ void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_m( @@ -255,7 +255,7 @@ void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_m( @@ -276,7 +276,7 @@ void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_m( @@ -297,7 +297,7 @@ void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_m( @@ -318,7 +318,7 @@ void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_m( @@ -339,6 +339,6 @@ void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c index e4f6fd257219..51bd6b107df4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2( @@ -50,7 +50,7 @@ void test_vlsseg7e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1( @@ -73,7 +73,7 @@ void test_vlsseg7e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4( @@ -96,7 +96,7 @@ void test_vlsseg7e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2( @@ -119,7 +119,7 @@ void test_vlsseg7e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1( @@ -142,7 +142,7 @@ void test_vlsseg7e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4( @@ -165,7 +165,7 @@ void test_vlsseg7e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2( @@ -188,7 +188,7 @@ void test_vlsseg7e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1( @@ -211,7 +211,7 @@ void test_vlsseg7e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_m( @@ -234,7 +234,7 @@ void test_vlsseg7e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_m( @@ -257,7 +257,7 @@ void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_m( @@ -280,7 +280,7 @@ void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_m( @@ -303,7 +303,7 @@ void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_m( @@ -326,7 +326,7 @@ void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_m( @@ -349,7 +349,7 @@ void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_m( @@ -372,7 +372,7 @@ void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_m( @@ -395,7 +395,7 @@ void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_m( @@ -418,6 +418,6 @@ void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c index 171877a70665..68a0a6fd1fd4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1( @@ -50,7 +50,7 @@ void test_vlsseg7e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2( @@ -73,7 +73,7 @@ void test_vlsseg7e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1( @@ -96,7 +96,7 @@ void test_vlsseg7e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2( @@ -119,7 +119,7 @@ void test_vlsseg7e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1( @@ -142,7 +142,7 @@ void test_vlsseg7e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_m( @@ -165,7 +165,7 @@ void test_vlsseg7e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_m( @@ -188,7 +188,7 @@ void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_m( @@ -211,7 +211,7 @@ void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_m( @@ -234,7 +234,7 @@ void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_m( @@ -257,7 +257,7 @@ void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_m( @@ -280,6 +280,6 @@ void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c index 9acd46732a96..46697c060b78 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1( @@ -50,7 +50,7 @@ void test_vlsseg7e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1( @@ -73,7 +73,7 @@ void test_vlsseg7e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_m( @@ -96,7 +96,7 @@ void test_vlsseg7e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_m( @@ -119,7 +119,7 @@ void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_m( @@ -142,6 +142,6 @@ void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c index 67266bca73a5..d3ba2b7fb245 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c @@ -26,7 +26,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4( @@ -49,7 +49,7 @@ void test_vlsseg7e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2( @@ -72,7 +72,7 @@ void test_vlsseg7e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1( @@ -95,7 +95,7 @@ void test_vlsseg7e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8( @@ -118,7 +118,7 @@ void test_vlsseg7e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4( @@ -141,7 +141,7 @@ void test_vlsseg7e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2( @@ -164,7 +164,7 @@ void test_vlsseg7e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1( @@ -187,7 +187,7 @@ void test_vlsseg7e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_m( @@ -210,7 +210,7 @@ void test_vlsseg7e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_m( @@ -233,7 +233,7 @@ void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_m( @@ -256,7 +256,7 @@ void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_m( @@ -279,7 +279,7 @@ void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_m( @@ -302,7 +302,7 @@ void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_m( @@ -325,7 +325,7 @@ void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_m( @@ -348,7 +348,7 @@ void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_m( @@ -371,6 +371,6 @@ void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c index 29729655c6f8..c788a3d328a8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2( @@ -54,7 +54,7 @@ void test_vlsseg8e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1( @@ -79,7 +79,7 @@ void test_vlsseg8e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4( @@ -104,7 +104,7 @@ void test_vlsseg8e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2( @@ -129,7 +129,7 @@ void test_vlsseg8e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1( @@ -154,7 +154,7 @@ void test_vlsseg8e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4( @@ -179,7 +179,7 @@ void test_vlsseg8e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2( @@ -204,7 +204,7 @@ void test_vlsseg8e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1( @@ -229,7 +229,7 @@ void test_vlsseg8e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_m( @@ -254,7 +254,7 @@ void test_vlsseg8e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_m( @@ -279,7 +279,7 @@ void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_m( @@ -304,7 +304,7 @@ void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_m( @@ -329,7 +329,7 @@ void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_m( @@ -354,7 +354,7 @@ void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_m( @@ -379,7 +379,7 @@ void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_m( @@ -404,7 +404,7 @@ void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_m( @@ -429,7 +429,7 @@ void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_m( @@ -454,6 +454,6 @@ void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c index f2aacebb953a..7377dbaa97ce 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1( @@ -54,7 +54,7 @@ void test_vlsseg8e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2( @@ -79,7 +79,7 @@ void test_vlsseg8e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1( @@ -104,7 +104,7 @@ void test_vlsseg8e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2( @@ -129,7 +129,7 @@ void test_vlsseg8e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1( @@ -154,7 +154,7 @@ void test_vlsseg8e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_m( @@ -179,7 +179,7 @@ void test_vlsseg8e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_m( @@ -204,7 +204,7 @@ void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_m( @@ -229,7 +229,7 @@ void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_m( @@ -254,7 +254,7 @@ void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_m( @@ -279,7 +279,7 @@ void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_m( @@ -304,6 +304,6 @@ void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c index c44a621a33c6..353c8afcf996 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1( @@ -54,7 +54,7 @@ void test_vlsseg8e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1( @@ -79,7 +79,7 @@ void test_vlsseg8e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_m( @@ -104,7 +104,7 @@ void test_vlsseg8e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_m( @@ -129,7 +129,7 @@ void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_m( @@ -154,6 +154,6 @@ void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c index e1d33a252efb..f62d84578b23 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c @@ -28,7 +28,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4( @@ -53,7 +53,7 @@ void test_vlsseg8e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2( @@ -78,7 +78,7 @@ void test_vlsseg8e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1( @@ -103,7 +103,7 @@ void test_vlsseg8e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8( @@ -128,7 +128,7 @@ void test_vlsseg8e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4( @@ -153,7 +153,7 @@ void test_vlsseg8e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2( @@ -178,7 +178,7 @@ void test_vlsseg8e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1( @@ -203,7 +203,7 @@ void test_vlsseg8e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_m( @@ -228,7 +228,7 @@ void test_vlsseg8e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_m( @@ -253,7 +253,7 @@ void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_m( @@ -278,7 +278,7 @@ void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_m( @@ -303,7 +303,7 @@ void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_m( @@ -328,7 +328,7 @@ void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_m( @@ -353,7 +353,7 @@ void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_m( @@ -378,7 +378,7 @@ void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_m( @@ -403,6 +403,6 @@ void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei16.c index 2416323b3aff..1cbd4a9bcf3f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f16mf4(base, bindex, vl); + return __riscv_vluxei16_v_f16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f16mf2(base, bindex, vl); + return __riscv_vluxei16_v_f16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f16m1(base, bindex, vl); + return __riscv_vluxei16_v_f16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f16m2(base, bindex, vl); + return __riscv_vluxei16_v_f16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f16m4(base, bindex, vl); + return __riscv_vluxei16_v_f16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_f16m8(base, bindex, vl); + return __riscv_vluxei16_v_f16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vluxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f32mf2(base, bindex, vl); + return __riscv_vluxei16_v_f32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f32m1(base, bindex, vl); + return __riscv_vluxei16_v_f32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f32m2(base, bindex, vl); + return __riscv_vluxei16_v_f32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f32m4(base, bindex, vl); + return __riscv_vluxei16_v_f32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f32m8(base, bindex, vl); + return __riscv_vluxei16_v_f32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f64m1(base, bindex, vl); + return __riscv_vluxei16_v_f64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f64m2(base, bindex, vl); + return __riscv_vluxei16_v_f64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f64m4(base, bindex, vl); + return __riscv_vluxei16_v_f64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f64m8(base, bindex, vl); + return __riscv_vluxei16_v_f64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8( @@ -148,7 +148,7 @@ vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i8mf8(base, bindex, vl); + return __riscv_vluxei16_v_i8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4( @@ -157,7 +157,7 @@ vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i8mf4(base, bindex, vl); + return __riscv_vluxei16_v_i8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2( @@ -166,7 +166,7 @@ vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i8mf2(base, bindex, vl); + return __riscv_vluxei16_v_i8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1( @@ -175,7 +175,7 @@ vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i8m1(base, bindex, vl); + return __riscv_vluxei16_v_i8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2( @@ -184,7 +184,7 @@ vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i8m2(base, bindex, vl); + return __riscv_vluxei16_v_i8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4( @@ -193,7 +193,7 @@ vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i8m4(base, bindex, vl); + return __riscv_vluxei16_v_i8m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4( @@ -202,7 +202,7 @@ vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i16mf4(base, bindex, vl); + return __riscv_vluxei16_v_i16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2( @@ -211,7 +211,7 @@ vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i16mf2(base, bindex, vl); + return __riscv_vluxei16_v_i16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1( @@ -220,7 +220,7 @@ vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i16m1(base, bindex, vl); + return __riscv_vluxei16_v_i16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2( @@ -229,7 +229,7 @@ vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i16m2(base, bindex, vl); + return __riscv_vluxei16_v_i16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4( @@ -238,7 +238,7 @@ vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i16m4(base, bindex, vl); + return __riscv_vluxei16_v_i16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8( @@ -247,7 +247,7 @@ vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i16m8(base, bindex, vl); + return __riscv_vluxei16_v_i16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2( @@ -256,7 +256,7 @@ vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i32mf2(base, bindex, vl); + return __riscv_vluxei16_v_i32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1( @@ -265,7 +265,7 @@ vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i32m1(base, bindex, vl); + return __riscv_vluxei16_v_i32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2( @@ -274,7 +274,7 @@ vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i32m2(base, bindex, vl); + return __riscv_vluxei16_v_i32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4( @@ -283,7 +283,7 @@ vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i32m4(base, bindex, vl); + return __riscv_vluxei16_v_i32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8( @@ -292,7 +292,7 @@ vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i32m8(base, bindex, vl); + return __riscv_vluxei16_v_i32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1( @@ -301,7 +301,7 @@ vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i64m1(base, bindex, vl); + return __riscv_vluxei16_v_i64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2( @@ -310,7 +310,7 @@ vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i64m2(base, bindex, vl); + return __riscv_vluxei16_v_i64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4( @@ -319,7 +319,7 @@ vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i64m4(base, bindex, vl); + return __riscv_vluxei16_v_i64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8( @@ -328,7 +328,7 @@ vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i64m8(base, bindex, vl); + return __riscv_vluxei16_v_i64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8( @@ -337,7 +337,7 @@ vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u8mf8(base, bindex, vl); + return __riscv_vluxei16_v_u8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4( @@ -346,7 +346,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u8mf4(base, bindex, vl); + return __riscv_vluxei16_v_u8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2( @@ -355,7 +355,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u8mf2(base, bindex, vl); + return __riscv_vluxei16_v_u8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1( @@ -364,7 +364,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u8m1(base, bindex, vl); + return __riscv_vluxei16_v_u8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2( @@ -373,7 +373,7 @@ vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u8m2(base, bindex, vl); + return __riscv_vluxei16_v_u8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4( @@ -382,7 +382,7 @@ vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u8m4(base, bindex, vl); + return __riscv_vluxei16_v_u8m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4( @@ -391,7 +391,7 @@ vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u16mf4(base, bindex, vl); + return __riscv_vluxei16_v_u16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2( @@ -400,7 +400,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u16mf2(base, bindex, vl); + return __riscv_vluxei16_v_u16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1( @@ -409,7 +409,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u16m1(base, bindex, vl); + return __riscv_vluxei16_v_u16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2( @@ -418,7 +418,7 @@ vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u16m2(base, bindex, vl); + return __riscv_vluxei16_v_u16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4( @@ -427,7 +427,7 @@ vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u16m4(base, bindex, vl); + return __riscv_vluxei16_v_u16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8( @@ -436,7 +436,7 @@ vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u16m8(base, bindex, vl); + return __riscv_vluxei16_v_u16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2( @@ -445,7 +445,7 @@ vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u32mf2(base, bindex, vl); + return __riscv_vluxei16_v_u32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1( @@ -454,7 +454,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u32m1(base, bindex, vl); + return __riscv_vluxei16_v_u32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2( @@ -463,7 +463,7 @@ vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u32m2(base, bindex, vl); + return __riscv_vluxei16_v_u32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4( @@ -472,7 +472,7 @@ vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u32m4(base, bindex, vl); + return __riscv_vluxei16_v_u32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8( @@ -481,7 +481,7 @@ vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u32m8(base, bindex, vl); + return __riscv_vluxei16_v_u32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1( @@ -490,7 +490,7 @@ vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u64m1(base, bindex, vl); + return __riscv_vluxei16_v_u64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2( @@ -499,7 +499,7 @@ vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u64m2(base, bindex, vl); + return __riscv_vluxei16_v_u64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4( @@ -508,7 +508,7 @@ vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u64m4(base, bindex, vl); + return __riscv_vluxei16_v_u64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8( @@ -517,7 +517,7 @@ vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u64m8(base, bindex, vl); + return __riscv_vluxei16_v_u64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_m( @@ -526,7 +526,7 @@ vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_m( @@ -535,7 +535,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_m( @@ -544,7 +544,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f16m1_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_m( @@ -553,7 +553,7 @@ vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f16m2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_m( @@ -562,7 +562,7 @@ vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f16m4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_m( @@ -571,7 +571,7 @@ vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_f16m8_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m( @@ -580,7 +580,7 @@ vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m( @@ -589,7 +589,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, const float *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f32m1_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m( @@ -598,7 +598,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, const float *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f32m2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m( @@ -607,7 +607,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, const float *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f32m4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m( @@ -616,7 +616,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, const float *base, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f32m8_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m( @@ -625,7 +625,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, const float *base, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f64m1_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m( @@ -634,7 +634,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, const double *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f64m2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m( @@ -643,7 +643,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, const double *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f64m4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m( @@ -652,7 +652,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, const double *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f64m8_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m( @@ -661,7 +661,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, const double *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i8mf8_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m( @@ -670,7 +670,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i8mf4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m( @@ -679,7 +679,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i8mf2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m( @@ -688,7 +688,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i8m1_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m( @@ -697,7 +697,7 @@ vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i8m2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m( @@ -706,7 +706,7 @@ vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i8m4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m( @@ -715,7 +715,7 @@ vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m( @@ -724,7 +724,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m( @@ -733,7 +733,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i16m1_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m( @@ -742,7 +742,7 @@ vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i16m2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m( @@ -751,7 +751,7 @@ vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i16m4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m( @@ -760,7 +760,7 @@ vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i16m8_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m( @@ -769,7 +769,7 @@ vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m( @@ -778,7 +778,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i32m1_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m( @@ -787,7 +787,7 @@ vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i32m2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m( @@ -796,7 +796,7 @@ vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i32m4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m( @@ -805,7 +805,7 @@ vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i32m8_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m( @@ -814,7 +814,7 @@ vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i64m1_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m( @@ -823,7 +823,7 @@ vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i64m2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m( @@ -832,7 +832,7 @@ vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i64m4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m( @@ -841,7 +841,7 @@ vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i64m8_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m( @@ -850,7 +850,7 @@ vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u8mf8_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m( @@ -859,7 +859,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u8mf4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m( @@ -868,7 +868,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u8mf2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m( @@ -877,7 +877,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u8m1_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m( @@ -886,7 +886,7 @@ vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u8m2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m( @@ -895,7 +895,7 @@ vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u8m4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m( @@ -904,7 +904,7 @@ vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m( @@ -913,7 +913,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m( @@ -922,7 +922,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u16m1_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m( @@ -931,7 +931,7 @@ vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u16m2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m( @@ -940,7 +940,7 @@ vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u16m4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m( @@ -949,7 +949,7 @@ vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u16m8_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m( @@ -958,7 +958,7 @@ vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m( @@ -967,7 +967,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u32m1_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m( @@ -976,7 +976,7 @@ vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u32m2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m( @@ -985,7 +985,7 @@ vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u32m4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m( @@ -994,7 +994,7 @@ vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u32m8_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m( @@ -1003,7 +1003,7 @@ vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u64m1_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m( @@ -1012,7 +1012,7 @@ vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u64m2_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m( @@ -1021,7 +1021,7 @@ vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u64m4_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m( @@ -1030,6 +1030,6 @@ vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u64m8_m(mask, base, bindex, vl); + return __riscv_vluxei16_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei32.c index 99043abc259b..f66fc16a5657 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f16mf4(base, bindex, vl); + return __riscv_vluxei32_v_f16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f16mf2(base, bindex, vl); + return __riscv_vluxei32_v_f16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f16m1(base, bindex, vl); + return __riscv_vluxei32_v_f16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f16m2(base, bindex, vl); + return __riscv_vluxei32_v_f16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f16m4(base, bindex, vl); + return __riscv_vluxei32_v_f16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2( @@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f32mf2(base, bindex, vl); + return __riscv_vluxei32_v_f32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1( @@ -67,7 +67,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f32m1(base, bindex, vl); + return __riscv_vluxei32_v_f32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2( @@ -76,7 +76,7 @@ vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f32m2(base, bindex, vl); + return __riscv_vluxei32_v_f32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4( @@ -85,7 +85,7 @@ vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f32m4(base, bindex, vl); + return __riscv_vluxei32_v_f32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8( @@ -94,7 +94,7 @@ vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f32m8(base, bindex, vl); + return __riscv_vluxei32_v_f32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1( @@ -103,7 +103,7 @@ vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f64m1(base, bindex, vl); + return __riscv_vluxei32_v_f64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2( @@ -112,7 +112,7 @@ vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f64m2(base, bindex, vl); + return __riscv_vluxei32_v_f64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4( @@ -121,7 +121,7 @@ vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f64m4(base, bindex, vl); + return __riscv_vluxei32_v_f64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8( @@ -130,7 +130,7 @@ vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f64m8(base, bindex, vl); + return __riscv_vluxei32_v_f64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8( @@ -139,7 +139,7 @@ vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i8mf8(base, bindex, vl); + return __riscv_vluxei32_v_i8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4( @@ -148,7 +148,7 @@ vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i8mf4(base, bindex, vl); + return __riscv_vluxei32_v_i8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2( @@ -157,7 +157,7 @@ vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i8mf2(base, bindex, vl); + return __riscv_vluxei32_v_i8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1( @@ -166,7 +166,7 @@ vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i8m1(base, bindex, vl); + return __riscv_vluxei32_v_i8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2( @@ -175,7 +175,7 @@ vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i8m2(base, bindex, vl); + return __riscv_vluxei32_v_i8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4( @@ -184,7 +184,7 @@ vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i16mf4(base, bindex, vl); + return __riscv_vluxei32_v_i16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2( @@ -193,7 +193,7 @@ vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i16mf2(base, bindex, vl); + return __riscv_vluxei32_v_i16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1( @@ -202,7 +202,7 @@ vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i16m1(base, bindex, vl); + return __riscv_vluxei32_v_i16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2( @@ -211,7 +211,7 @@ vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i16m2(base, bindex, vl); + return __riscv_vluxei32_v_i16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4( @@ -220,7 +220,7 @@ vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i16m4(base, bindex, vl); + return __riscv_vluxei32_v_i16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2( @@ -229,7 +229,7 @@ vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i32mf2(base, bindex, vl); + return __riscv_vluxei32_v_i32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1( @@ -238,7 +238,7 @@ vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i32m1(base, bindex, vl); + return __riscv_vluxei32_v_i32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2( @@ -247,7 +247,7 @@ vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i32m2(base, bindex, vl); + return __riscv_vluxei32_v_i32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4( @@ -256,7 +256,7 @@ vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i32m4(base, bindex, vl); + return __riscv_vluxei32_v_i32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8( @@ -265,7 +265,7 @@ vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i32m8(base, bindex, vl); + return __riscv_vluxei32_v_i32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1( @@ -274,7 +274,7 @@ vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i64m1(base, bindex, vl); + return __riscv_vluxei32_v_i64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2( @@ -283,7 +283,7 @@ vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i64m2(base, bindex, vl); + return __riscv_vluxei32_v_i64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4( @@ -292,7 +292,7 @@ vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i64m4(base, bindex, vl); + return __riscv_vluxei32_v_i64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8( @@ -301,7 +301,7 @@ vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i64m8(base, bindex, vl); + return __riscv_vluxei32_v_i64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8( @@ -310,7 +310,7 @@ vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u8mf8(base, bindex, vl); + return __riscv_vluxei32_v_u8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4( @@ -319,7 +319,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u8mf4(base, bindex, vl); + return __riscv_vluxei32_v_u8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2( @@ -328,7 +328,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u8mf2(base, bindex, vl); + return __riscv_vluxei32_v_u8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1( @@ -337,7 +337,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u8m1(base, bindex, vl); + return __riscv_vluxei32_v_u8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2( @@ -346,7 +346,7 @@ vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u8m2(base, bindex, vl); + return __riscv_vluxei32_v_u8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4( @@ -355,7 +355,7 @@ vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u16mf4(base, bindex, vl); + return __riscv_vluxei32_v_u16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2( @@ -364,7 +364,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u16mf2(base, bindex, vl); + return __riscv_vluxei32_v_u16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1( @@ -373,7 +373,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u16m1(base, bindex, vl); + return __riscv_vluxei32_v_u16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2( @@ -382,7 +382,7 @@ vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u16m2(base, bindex, vl); + return __riscv_vluxei32_v_u16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4( @@ -391,7 +391,7 @@ vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u16m4(base, bindex, vl); + return __riscv_vluxei32_v_u16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2( @@ -400,7 +400,7 @@ vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u32mf2(base, bindex, vl); + return __riscv_vluxei32_v_u32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1( @@ -409,7 +409,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u32m1(base, bindex, vl); + return __riscv_vluxei32_v_u32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2( @@ -418,7 +418,7 @@ vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u32m2(base, bindex, vl); + return __riscv_vluxei32_v_u32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4( @@ -427,7 +427,7 @@ vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u32m4(base, bindex, vl); + return __riscv_vluxei32_v_u32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8( @@ -436,7 +436,7 @@ vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u32m8(base, bindex, vl); + return __riscv_vluxei32_v_u32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1( @@ -445,7 +445,7 @@ vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u64m1(base, bindex, vl); + return __riscv_vluxei32_v_u64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2( @@ -454,7 +454,7 @@ vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u64m2(base, bindex, vl); + return __riscv_vluxei32_v_u64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4( @@ -463,7 +463,7 @@ vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u64m4(base, bindex, vl); + return __riscv_vluxei32_v_u64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8( @@ -472,7 +472,7 @@ vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u64m8(base, bindex, vl); + return __riscv_vluxei32_v_u64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_m( @@ -481,7 +481,7 @@ vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_m( @@ -490,7 +490,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_m( @@ -499,7 +499,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f16m1_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_m( @@ -508,7 +508,7 @@ vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f16m2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_m( @@ -517,7 +517,7 @@ vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f16m4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m( @@ -526,7 +526,7 @@ vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m( @@ -535,7 +535,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, const float *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f32m1_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m( @@ -544,7 +544,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, const float *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f32m2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m( @@ -553,7 +553,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, const float *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f32m4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m( @@ -562,7 +562,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, const float *base, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f32m8_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m( @@ -571,7 +571,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, const float *base, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f64m1_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m( @@ -580,7 +580,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, const double *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f64m2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m( @@ -589,7 +589,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, const double *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f64m4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m( @@ -598,7 +598,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, const double *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f64m8_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m( @@ -607,7 +607,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, const double *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i8mf8_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m( @@ -616,7 +616,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i8mf4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m( @@ -625,7 +625,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i8mf2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m( @@ -634,7 +634,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i8m1_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m( @@ -643,7 +643,7 @@ vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i8m2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m( @@ -652,7 +652,7 @@ vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m( @@ -661,7 +661,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m( @@ -670,7 +670,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i16m1_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m( @@ -679,7 +679,7 @@ vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i16m2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m( @@ -688,7 +688,7 @@ vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i16m4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m( @@ -697,7 +697,7 @@ vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m( @@ -706,7 +706,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i32m1_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m( @@ -715,7 +715,7 @@ vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i32m2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m( @@ -724,7 +724,7 @@ vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i32m4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m( @@ -733,7 +733,7 @@ vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i32m8_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m( @@ -742,7 +742,7 @@ vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i64m1_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m( @@ -751,7 +751,7 @@ vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i64m2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m( @@ -760,7 +760,7 @@ vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i64m4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m( @@ -769,7 +769,7 @@ vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i64m8_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m( @@ -778,7 +778,7 @@ vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u8mf8_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m( @@ -787,7 +787,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u8mf4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m( @@ -796,7 +796,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u8mf2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m( @@ -805,7 +805,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u8m1_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m( @@ -814,7 +814,7 @@ vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u8m2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m( @@ -823,7 +823,7 @@ vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m( @@ -832,7 +832,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m( @@ -841,7 +841,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u16m1_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m( @@ -850,7 +850,7 @@ vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u16m2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m( @@ -859,7 +859,7 @@ vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u16m4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m( @@ -868,7 +868,7 @@ vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m( @@ -877,7 +877,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u32m1_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m( @@ -886,7 +886,7 @@ vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u32m2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m( @@ -895,7 +895,7 @@ vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u32m4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m( @@ -904,7 +904,7 @@ vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u32m8_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m( @@ -913,7 +913,7 @@ vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u64m1_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m( @@ -922,7 +922,7 @@ vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u64m2_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m( @@ -931,7 +931,7 @@ vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u64m4_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m( @@ -940,6 +940,6 @@ vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u64m8_m(mask, base, bindex, vl); + return __riscv_vluxei32_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei64.c index d1f373c9c5a8..eecc8db96e70 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f16mf4(base, bindex, vl); + return __riscv_vluxei64_v_f16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f16mf2(base, bindex, vl); + return __riscv_vluxei64_v_f16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f16m1(base, bindex, vl); + return __riscv_vluxei64_v_f16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f16m2(base, bindex, vl); + return __riscv_vluxei64_v_f16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2( @@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f32mf2(base, bindex, vl); + return __riscv_vluxei64_v_f32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1( @@ -58,7 +58,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f32m1(base, bindex, vl); + return __riscv_vluxei64_v_f32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2( @@ -67,7 +67,7 @@ vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f32m2(base, bindex, vl); + return __riscv_vluxei64_v_f32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4( @@ -76,7 +76,7 @@ vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f32m4(base, bindex, vl); + return __riscv_vluxei64_v_f32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1( @@ -85,7 +85,7 @@ vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f64m1(base, bindex, vl); + return __riscv_vluxei64_v_f64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2( @@ -94,7 +94,7 @@ vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f64m2(base, bindex, vl); + return __riscv_vluxei64_v_f64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4( @@ -103,7 +103,7 @@ vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f64m4(base, bindex, vl); + return __riscv_vluxei64_v_f64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8( @@ -112,7 +112,7 @@ vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f64m8(base, bindex, vl); + return __riscv_vluxei64_v_f64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8( @@ -121,7 +121,7 @@ vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i8mf8(base, bindex, vl); + return __riscv_vluxei64_v_i8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4( @@ -130,7 +130,7 @@ vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i8mf4(base, bindex, vl); + return __riscv_vluxei64_v_i8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2( @@ -139,7 +139,7 @@ vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i8mf2(base, bindex, vl); + return __riscv_vluxei64_v_i8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1( @@ -148,7 +148,7 @@ vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i8m1(base, bindex, vl); + return __riscv_vluxei64_v_i8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4( @@ -157,7 +157,7 @@ vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i16mf4(base, bindex, vl); + return __riscv_vluxei64_v_i16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2( @@ -166,7 +166,7 @@ vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i16mf2(base, bindex, vl); + return __riscv_vluxei64_v_i16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1( @@ -175,7 +175,7 @@ vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i16m1(base, bindex, vl); + return __riscv_vluxei64_v_i16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2( @@ -184,7 +184,7 @@ vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i16m2(base, bindex, vl); + return __riscv_vluxei64_v_i16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2( @@ -193,7 +193,7 @@ vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i32mf2(base, bindex, vl); + return __riscv_vluxei64_v_i32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1( @@ -202,7 +202,7 @@ vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i32m1(base, bindex, vl); + return __riscv_vluxei64_v_i32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2( @@ -211,7 +211,7 @@ vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i32m2(base, bindex, vl); + return __riscv_vluxei64_v_i32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4( @@ -220,7 +220,7 @@ vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i32m4(base, bindex, vl); + return __riscv_vluxei64_v_i32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1( @@ -229,7 +229,7 @@ vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i64m1(base, bindex, vl); + return __riscv_vluxei64_v_i64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2( @@ -238,7 +238,7 @@ vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i64m2(base, bindex, vl); + return __riscv_vluxei64_v_i64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4( @@ -247,7 +247,7 @@ vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i64m4(base, bindex, vl); + return __riscv_vluxei64_v_i64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8( @@ -256,7 +256,7 @@ vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i64m8(base, bindex, vl); + return __riscv_vluxei64_v_i64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8( @@ -265,7 +265,7 @@ vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u8mf8(base, bindex, vl); + return __riscv_vluxei64_v_u8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4( @@ -274,7 +274,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u8mf4(base, bindex, vl); + return __riscv_vluxei64_v_u8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2( @@ -283,7 +283,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u8mf2(base, bindex, vl); + return __riscv_vluxei64_v_u8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1( @@ -292,7 +292,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u8m1(base, bindex, vl); + return __riscv_vluxei64_v_u8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4( @@ -301,7 +301,7 @@ vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u16mf4(base, bindex, vl); + return __riscv_vluxei64_v_u16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2( @@ -310,7 +310,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u16mf2(base, bindex, vl); + return __riscv_vluxei64_v_u16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1( @@ -319,7 +319,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u16m1(base, bindex, vl); + return __riscv_vluxei64_v_u16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2( @@ -328,7 +328,7 @@ vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u16m2(base, bindex, vl); + return __riscv_vluxei64_v_u16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2( @@ -337,7 +337,7 @@ vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u32mf2(base, bindex, vl); + return __riscv_vluxei64_v_u32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1( @@ -346,7 +346,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u32m1(base, bindex, vl); + return __riscv_vluxei64_v_u32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2( @@ -355,7 +355,7 @@ vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u32m2(base, bindex, vl); + return __riscv_vluxei64_v_u32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4( @@ -364,7 +364,7 @@ vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u32m4(base, bindex, vl); + return __riscv_vluxei64_v_u32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1( @@ -373,7 +373,7 @@ vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u64m1(base, bindex, vl); + return __riscv_vluxei64_v_u64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2( @@ -382,7 +382,7 @@ vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u64m2(base, bindex, vl); + return __riscv_vluxei64_v_u64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4( @@ -391,7 +391,7 @@ vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u64m4(base, bindex, vl); + return __riscv_vluxei64_v_u64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8( @@ -400,7 +400,7 @@ vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u64m8(base, bindex, vl); + return __riscv_vluxei64_v_u64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_m( @@ -409,7 +409,7 @@ vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_m( @@ -418,7 +418,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_m( @@ -427,7 +427,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f16m1_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_m( @@ -436,7 +436,7 @@ vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f16m2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m( @@ -445,7 +445,7 @@ vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m( @@ -454,7 +454,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, const float *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f32m1_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m( @@ -463,7 +463,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, const float *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f32m2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m( @@ -472,7 +472,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, const float *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f32m4_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m( @@ -481,7 +481,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, const float *base, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f64m1_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m( @@ -490,7 +490,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, const double *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f64m2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m( @@ -499,7 +499,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, const double *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f64m4_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m( @@ -508,7 +508,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, const double *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f64m8_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m( @@ -517,7 +517,7 @@ vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, const double *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i8mf8_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m( @@ -526,7 +526,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i8mf4_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m( @@ -535,7 +535,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i8mf2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m( @@ -544,7 +544,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i8m1_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m( @@ -553,7 +553,7 @@ vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m( @@ -562,7 +562,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m( @@ -571,7 +571,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i16m1_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m( @@ -580,7 +580,7 @@ vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i16m2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m( @@ -589,7 +589,7 @@ vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m( @@ -598,7 +598,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i32m1_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m( @@ -607,7 +607,7 @@ vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i32m2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m( @@ -616,7 +616,7 @@ vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i32m4_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m( @@ -625,7 +625,7 @@ vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i64m1_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m( @@ -634,7 +634,7 @@ vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i64m2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m( @@ -643,7 +643,7 @@ vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i64m4_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m( @@ -652,7 +652,7 @@ vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i64m8_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m( @@ -661,7 +661,7 @@ vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u8mf8_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m( @@ -670,7 +670,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u8mf4_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m( @@ -679,7 +679,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u8mf2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m( @@ -688,7 +688,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u8m1_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m( @@ -697,7 +697,7 @@ vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m( @@ -706,7 +706,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m( @@ -715,7 +715,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u16m1_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m( @@ -724,7 +724,7 @@ vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u16m2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m( @@ -733,7 +733,7 @@ vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m( @@ -742,7 +742,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u32m1_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m( @@ -751,7 +751,7 @@ vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u32m2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m( @@ -760,7 +760,7 @@ vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u32m4_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m( @@ -769,7 +769,7 @@ vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u64m1_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m( @@ -778,7 +778,7 @@ vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u64m2_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m( @@ -787,7 +787,7 @@ vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u64m4_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m( @@ -796,6 +796,6 @@ vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u64m8_m(mask, base, bindex, vl); + return __riscv_vluxei64_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei8.c index c3450c6b8f12..f4af174715b9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f16mf4(base, bindex, vl); + return __riscv_vluxei8_v_f16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f16mf2(base, bindex, vl); + return __riscv_vluxei8_v_f16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f16m1(base, bindex, vl); + return __riscv_vluxei8_v_f16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f16m2(base, bindex, vl); + return __riscv_vluxei8_v_f16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f16m4(base, bindex, vl); + return __riscv_vluxei8_v_f16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_f16m8(base, bindex, vl); + return __riscv_vluxei8_v_f16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vluxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f32mf2(base, bindex, vl); + return __riscv_vluxei8_v_f32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f32m1(base, bindex, vl); + return __riscv_vluxei8_v_f32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f32m2(base, bindex, vl); + return __riscv_vluxei8_v_f32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f32m4(base, bindex, vl); + return __riscv_vluxei8_v_f32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f32m8(base, bindex, vl); + return __riscv_vluxei8_v_f32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f64m1(base, bindex, vl); + return __riscv_vluxei8_v_f64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f64m2(base, bindex, vl); + return __riscv_vluxei8_v_f64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f64m4(base, bindex, vl); + return __riscv_vluxei8_v_f64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f64m8(base, bindex, vl); + return __riscv_vluxei8_v_f64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8( @@ -148,7 +148,7 @@ vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i8mf8(base, bindex, vl); + return __riscv_vluxei8_v_i8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4( @@ -157,7 +157,7 @@ vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i8mf4(base, bindex, vl); + return __riscv_vluxei8_v_i8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2( @@ -166,7 +166,7 @@ vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i8mf2(base, bindex, vl); + return __riscv_vluxei8_v_i8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1( @@ -175,7 +175,7 @@ vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i8m1(base, bindex, vl); + return __riscv_vluxei8_v_i8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2( @@ -184,7 +184,7 @@ vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i8m2(base, bindex, vl); + return __riscv_vluxei8_v_i8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4( @@ -193,7 +193,7 @@ vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i8m4(base, bindex, vl); + return __riscv_vluxei8_v_i8m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8( @@ -202,7 +202,7 @@ vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_i8m8(base, bindex, vl); + return __riscv_vluxei8_v_i8m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4( @@ -211,7 +211,7 @@ vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i16mf4(base, bindex, vl); + return __riscv_vluxei8_v_i16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2( @@ -220,7 +220,7 @@ vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i16mf2(base, bindex, vl); + return __riscv_vluxei8_v_i16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1( @@ -229,7 +229,7 @@ vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i16m1(base, bindex, vl); + return __riscv_vluxei8_v_i16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2( @@ -238,7 +238,7 @@ vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i16m2(base, bindex, vl); + return __riscv_vluxei8_v_i16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4( @@ -247,7 +247,7 @@ vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i16m4(base, bindex, vl); + return __riscv_vluxei8_v_i16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8( @@ -256,7 +256,7 @@ vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i16m8(base, bindex, vl); + return __riscv_vluxei8_v_i16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2( @@ -265,7 +265,7 @@ vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i32mf2(base, bindex, vl); + return __riscv_vluxei8_v_i32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1( @@ -274,7 +274,7 @@ vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i32m1(base, bindex, vl); + return __riscv_vluxei8_v_i32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2( @@ -283,7 +283,7 @@ vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i32m2(base, bindex, vl); + return __riscv_vluxei8_v_i32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4( @@ -292,7 +292,7 @@ vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i32m4(base, bindex, vl); + return __riscv_vluxei8_v_i32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8( @@ -301,7 +301,7 @@ vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i32m8(base, bindex, vl); + return __riscv_vluxei8_v_i32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1( @@ -310,7 +310,7 @@ vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i64m1(base, bindex, vl); + return __riscv_vluxei8_v_i64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2( @@ -319,7 +319,7 @@ vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i64m2(base, bindex, vl); + return __riscv_vluxei8_v_i64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4( @@ -328,7 +328,7 @@ vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i64m4(base, bindex, vl); + return __riscv_vluxei8_v_i64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8( @@ -337,7 +337,7 @@ vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i64m8(base, bindex, vl); + return __riscv_vluxei8_v_i64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8( @@ -346,7 +346,7 @@ vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u8mf8(base, bindex, vl); + return __riscv_vluxei8_v_u8mf8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4( @@ -355,7 +355,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u8mf4(base, bindex, vl); + return __riscv_vluxei8_v_u8mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2( @@ -364,7 +364,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u8mf2(base, bindex, vl); + return __riscv_vluxei8_v_u8mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1( @@ -373,7 +373,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u8m1(base, bindex, vl); + return __riscv_vluxei8_v_u8m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2( @@ -382,7 +382,7 @@ vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u8m2(base, bindex, vl); + return __riscv_vluxei8_v_u8m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4( @@ -391,7 +391,7 @@ vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u8m4(base, bindex, vl); + return __riscv_vluxei8_v_u8m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8( @@ -400,7 +400,7 @@ vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_u8m8(base, bindex, vl); + return __riscv_vluxei8_v_u8m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4( @@ -409,7 +409,7 @@ vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u16mf4(base, bindex, vl); + return __riscv_vluxei8_v_u16mf4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2( @@ -418,7 +418,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u16mf2(base, bindex, vl); + return __riscv_vluxei8_v_u16mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1( @@ -427,7 +427,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u16m1(base, bindex, vl); + return __riscv_vluxei8_v_u16m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2( @@ -436,7 +436,7 @@ vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u16m2(base, bindex, vl); + return __riscv_vluxei8_v_u16m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4( @@ -445,7 +445,7 @@ vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u16m4(base, bindex, vl); + return __riscv_vluxei8_v_u16m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8( @@ -454,7 +454,7 @@ vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u16m8(base, bindex, vl); + return __riscv_vluxei8_v_u16m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2( @@ -463,7 +463,7 @@ vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u32mf2(base, bindex, vl); + return __riscv_vluxei8_v_u32mf2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1( @@ -472,7 +472,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u32m1(base, bindex, vl); + return __riscv_vluxei8_v_u32m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2( @@ -481,7 +481,7 @@ vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u32m2(base, bindex, vl); + return __riscv_vluxei8_v_u32m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4( @@ -490,7 +490,7 @@ vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u32m4(base, bindex, vl); + return __riscv_vluxei8_v_u32m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8( @@ -499,7 +499,7 @@ vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u32m8(base, bindex, vl); + return __riscv_vluxei8_v_u32m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1( @@ -508,7 +508,7 @@ vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u64m1(base, bindex, vl); + return __riscv_vluxei8_v_u64m1(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2( @@ -517,7 +517,7 @@ vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u64m2(base, bindex, vl); + return __riscv_vluxei8_v_u64m2(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4( @@ -526,7 +526,7 @@ vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u64m4(base, bindex, vl); + return __riscv_vluxei8_v_u64m4(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8( @@ -535,7 +535,7 @@ vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u64m8(base, bindex, vl); + return __riscv_vluxei8_v_u64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_m( @@ -544,7 +544,7 @@ vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_m( @@ -553,7 +553,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, const _Float16 *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_m( @@ -562,7 +562,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, const _Float16 *base, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f16m1_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_m( @@ -571,7 +571,7 @@ vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, const _Float16 *base, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f16m2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_m( @@ -580,7 +580,7 @@ vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, const _Float16 *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f16m4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_m( @@ -589,7 +589,7 @@ vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, const _Float16 *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_f16m8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m( @@ -598,7 +598,7 @@ vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, const _Float16 *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m( @@ -607,7 +607,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, const float *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f32m1_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m( @@ -616,7 +616,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, const float *base, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f32m2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m( @@ -625,7 +625,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, const float *base, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f32m4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m( @@ -634,7 +634,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, const float *base, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f32m8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m( @@ -643,7 +643,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, const float *base, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f64m1_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m( @@ -652,7 +652,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, const double *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f64m2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m( @@ -661,7 +661,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, const double *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f64m4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m( @@ -670,7 +670,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, const double *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f64m8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_f64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m( @@ -679,7 +679,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, const double *base, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i8mf8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m( @@ -688,7 +688,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i8mf4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m( @@ -697,7 +697,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i8mf2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m( @@ -706,7 +706,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i8m1_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m( @@ -715,7 +715,7 @@ vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i8m2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m( @@ -724,7 +724,7 @@ vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i8m4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m( @@ -733,7 +733,7 @@ vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_i8m8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i8m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m( @@ -742,7 +742,7 @@ vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m( @@ -751,7 +751,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m( @@ -760,7 +760,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i16m1_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m( @@ -769,7 +769,7 @@ vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i16m2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m( @@ -778,7 +778,7 @@ vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i16m4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m( @@ -787,7 +787,7 @@ vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i16m8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m( @@ -796,7 +796,7 @@ vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m( @@ -805,7 +805,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i32m1_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m( @@ -814,7 +814,7 @@ vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i32m2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m( @@ -823,7 +823,7 @@ vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i32m4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m( @@ -832,7 +832,7 @@ vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i32m8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m( @@ -841,7 +841,7 @@ vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i64m1_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m( @@ -850,7 +850,7 @@ vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i64m2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m( @@ -859,7 +859,7 @@ vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i64m4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m( @@ -868,7 +868,7 @@ vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i64m8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_i64m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m( @@ -877,7 +877,7 @@ vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u8mf8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u8mf8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m( @@ -886,7 +886,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u8mf4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u8mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m( @@ -895,7 +895,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u8mf2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u8mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m( @@ -904,7 +904,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u8m1_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u8m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m( @@ -913,7 +913,7 @@ vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u8m2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u8m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m( @@ -922,7 +922,7 @@ vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u8m4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u8m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m( @@ -931,7 +931,7 @@ vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_u8m8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u8m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m( @@ -940,7 +940,7 @@ vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u16mf4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u16mf4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m( @@ -949,7 +949,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u16mf2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u16mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m( @@ -958,7 +958,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u16m1_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u16m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m( @@ -967,7 +967,7 @@ vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u16m2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u16m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m( @@ -976,7 +976,7 @@ vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u16m4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u16m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m( @@ -985,7 +985,7 @@ vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u16m8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u16m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m( @@ -994,7 +994,7 @@ vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u32mf2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u32mf2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m( @@ -1003,7 +1003,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u32m1_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u32m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m( @@ -1012,7 +1012,7 @@ vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u32m2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u32m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m( @@ -1021,7 +1021,7 @@ vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u32m4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u32m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m( @@ -1030,7 +1030,7 @@ vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u32m8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u32m8_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m( @@ -1039,7 +1039,7 @@ vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u64m1_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u64m1_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m( @@ -1048,7 +1048,7 @@ vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u64m2_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u64m2_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m( @@ -1057,7 +1057,7 @@ vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u64m4_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u64m4_m(mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m( @@ -1066,6 +1066,6 @@ vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u64m8_m(mask, base, bindex, vl); + return __riscv_vluxei8_v_u64m8_m(mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c index 6793a4d2e9ad..8900d052ef70 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei16.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2( @@ -30,7 +30,7 @@ void test_vluxseg2ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1( @@ -43,7 +43,7 @@ void test_vluxseg2ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2( @@ -56,7 +56,7 @@ void test_vluxseg2ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4( @@ -69,7 +69,7 @@ void test_vluxseg2ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2( @@ -82,7 +82,7 @@ void test_vluxseg2ei16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1( @@ -95,7 +95,7 @@ void test_vluxseg2ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2( @@ -108,7 +108,7 @@ void test_vluxseg2ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4( @@ -121,7 +121,7 @@ void test_vluxseg2ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1( @@ -134,7 +134,7 @@ void test_vluxseg2ei16_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2( @@ -147,7 +147,7 @@ void test_vluxseg2ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4( @@ -160,7 +160,7 @@ void test_vluxseg2ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8( @@ -173,7 +173,7 @@ void test_vluxseg2ei16_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf8(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4( @@ -186,7 +186,7 @@ void test_vluxseg2ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2( @@ -199,7 +199,7 @@ void test_vluxseg2ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1( @@ -212,7 +212,7 @@ void test_vluxseg2ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2( @@ -225,7 +225,7 @@ void test_vluxseg2ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4( @@ -238,7 +238,7 @@ void test_vluxseg2ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4( @@ -251,7 +251,7 @@ void test_vluxseg2ei16_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2( @@ -264,7 +264,7 @@ void test_vluxseg2ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1( @@ -277,7 +277,7 @@ void test_vluxseg2ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2( @@ -290,7 +290,7 @@ void test_vluxseg2ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4( @@ -303,7 +303,7 @@ void test_vluxseg2ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2( @@ -316,7 +316,7 @@ void test_vluxseg2ei16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1( @@ -329,7 +329,7 @@ void test_vluxseg2ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2( @@ -342,7 +342,7 @@ void test_vluxseg2ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4( @@ -355,7 +355,7 @@ void test_vluxseg2ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1( @@ -368,7 +368,7 @@ void test_vluxseg2ei16_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2( @@ -381,7 +381,7 @@ void test_vluxseg2ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4( @@ -394,7 +394,7 @@ void test_vluxseg2ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8( @@ -407,7 +407,7 @@ void test_vluxseg2ei16_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf8(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4( @@ -420,7 +420,7 @@ void test_vluxseg2ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2( @@ -433,7 +433,7 @@ void test_vluxseg2ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1( @@ -446,7 +446,7 @@ void test_vluxseg2ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2( @@ -459,7 +459,7 @@ void test_vluxseg2ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4( @@ -472,7 +472,7 @@ void test_vluxseg2ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4( @@ -485,7 +485,7 @@ void test_vluxseg2ei16_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2( @@ -498,7 +498,7 @@ void test_vluxseg2ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1( @@ -511,7 +511,7 @@ void test_vluxseg2ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2( @@ -524,7 +524,7 @@ void test_vluxseg2ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4( @@ -537,7 +537,7 @@ void test_vluxseg2ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2( @@ -550,7 +550,7 @@ void test_vluxseg2ei16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1( @@ -563,7 +563,7 @@ void test_vluxseg2ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2( @@ -576,7 +576,7 @@ void test_vluxseg2ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4( @@ -589,7 +589,7 @@ void test_vluxseg2ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1( @@ -602,7 +602,7 @@ void test_vluxseg2ei16_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2( @@ -615,7 +615,7 @@ void test_vluxseg2ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4( @@ -628,7 +628,7 @@ void test_vluxseg2ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_m( @@ -641,7 +641,7 @@ void test_vluxseg2ei16_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_m( @@ -654,7 +654,7 @@ void test_vluxseg2ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_m( @@ -667,7 +667,7 @@ void test_vluxseg2ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_m( @@ -680,7 +680,7 @@ void test_vluxseg2ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_m( @@ -693,7 +693,7 @@ void test_vluxseg2ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( @@ -706,7 +706,7 @@ void test_vluxseg2ei16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_m( @@ -719,7 +719,7 @@ void test_vluxseg2ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_m( @@ -732,7 +732,7 @@ void test_vluxseg2ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_m( @@ -745,7 +745,7 @@ void test_vluxseg2ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_m( @@ -758,7 +758,7 @@ void test_vluxseg2ei16_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_m( @@ -771,7 +771,7 @@ void test_vluxseg2ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_m( @@ -784,7 +784,7 @@ void test_vluxseg2ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m( @@ -797,7 +797,7 @@ void test_vluxseg2ei16_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_m( @@ -810,7 +810,7 @@ void test_vluxseg2ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_m( @@ -823,7 +823,7 @@ void test_vluxseg2ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_m( @@ -836,7 +836,7 @@ void test_vluxseg2ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_m( @@ -849,7 +849,7 @@ void test_vluxseg2ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_m( @@ -862,7 +862,7 @@ void test_vluxseg2ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, con // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( @@ -875,7 +875,7 @@ void test_vluxseg2ei16_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, con // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_m( @@ -888,7 +888,7 @@ void test_vluxseg2ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vluxseg2ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_m( @@ -914,7 +914,7 @@ void test_vluxseg2ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_m( @@ -927,7 +927,7 @@ void test_vluxseg2ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m( @@ -940,7 +940,7 @@ void test_vluxseg2ei16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_m( @@ -953,7 +953,7 @@ void test_vluxseg2ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_m( @@ -966,7 +966,7 @@ void test_vluxseg2ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_m( @@ -979,7 +979,7 @@ void test_vluxseg2ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_m( @@ -992,7 +992,7 @@ void test_vluxseg2ei16_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_m( @@ -1005,7 +1005,7 @@ void test_vluxseg2ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_m( @@ -1018,7 +1018,7 @@ void test_vluxseg2ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( @@ -1031,7 +1031,7 @@ void test_vluxseg2ei16_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( @@ -1044,7 +1044,7 @@ void test_vluxseg2ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( @@ -1057,7 +1057,7 @@ void test_vluxseg2ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_m( @@ -1070,7 +1070,7 @@ void test_vluxseg2ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_m( @@ -1083,7 +1083,7 @@ void test_vluxseg2ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_m( @@ -1096,7 +1096,7 @@ void test_vluxseg2ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, c // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( @@ -1109,7 +1109,7 @@ void test_vluxseg2ei16_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, c // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( @@ -1122,7 +1122,7 @@ void test_vluxseg2ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_m( @@ -1135,7 +1135,7 @@ void test_vluxseg2ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_m( @@ -1148,7 +1148,7 @@ void test_vluxseg2ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_m( @@ -1161,7 +1161,7 @@ void test_vluxseg2ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( @@ -1174,7 +1174,7 @@ void test_vluxseg2ei16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_m( @@ -1187,7 +1187,7 @@ void test_vluxseg2ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_m( @@ -1200,7 +1200,7 @@ void test_vluxseg2ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_m( @@ -1213,7 +1213,7 @@ void test_vluxseg2ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_m( @@ -1226,7 +1226,7 @@ void test_vluxseg2ei16_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_m( @@ -1239,7 +1239,7 @@ void test_vluxseg2ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_m( @@ -1252,6 +1252,6 @@ void test_vluxseg2ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c index 36210094d881..9b7f65f9973c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei32.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2( @@ -30,7 +30,7 @@ void test_vluxseg2ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1( @@ -43,7 +43,7 @@ void test_vluxseg2ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2( @@ -56,7 +56,7 @@ void test_vluxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4( @@ -69,7 +69,7 @@ void test_vluxseg2ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2( @@ -82,7 +82,7 @@ void test_vluxseg2ei32_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1( @@ -95,7 +95,7 @@ void test_vluxseg2ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2( @@ -108,7 +108,7 @@ void test_vluxseg2ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4( @@ -121,7 +121,7 @@ void test_vluxseg2ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1( @@ -134,7 +134,7 @@ void test_vluxseg2ei32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2( @@ -147,7 +147,7 @@ void test_vluxseg2ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4( @@ -160,7 +160,7 @@ void test_vluxseg2ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8( @@ -173,7 +173,7 @@ void test_vluxseg2ei32_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf8(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4( @@ -186,7 +186,7 @@ void test_vluxseg2ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2( @@ -199,7 +199,7 @@ void test_vluxseg2ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1( @@ -212,7 +212,7 @@ void test_vluxseg2ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2( @@ -225,7 +225,7 @@ void test_vluxseg2ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4( @@ -238,7 +238,7 @@ void test_vluxseg2ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2( @@ -251,7 +251,7 @@ void test_vluxseg2ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1( @@ -264,7 +264,7 @@ void test_vluxseg2ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2( @@ -277,7 +277,7 @@ void test_vluxseg2ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4( @@ -290,7 +290,7 @@ void test_vluxseg2ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2( @@ -303,7 +303,7 @@ void test_vluxseg2ei32_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1( @@ -316,7 +316,7 @@ void test_vluxseg2ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2( @@ -329,7 +329,7 @@ void test_vluxseg2ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4( @@ -342,7 +342,7 @@ void test_vluxseg2ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1( @@ -355,7 +355,7 @@ void test_vluxseg2ei32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2( @@ -368,7 +368,7 @@ void test_vluxseg2ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4( @@ -381,7 +381,7 @@ void test_vluxseg2ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8( @@ -394,7 +394,7 @@ void test_vluxseg2ei32_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf8(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4( @@ -407,7 +407,7 @@ void test_vluxseg2ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2( @@ -420,7 +420,7 @@ void test_vluxseg2ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1( @@ -433,7 +433,7 @@ void test_vluxseg2ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2( @@ -446,7 +446,7 @@ void test_vluxseg2ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4( @@ -459,7 +459,7 @@ void test_vluxseg2ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2( @@ -472,7 +472,7 @@ void test_vluxseg2ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1( @@ -485,7 +485,7 @@ void test_vluxseg2ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2( @@ -498,7 +498,7 @@ void test_vluxseg2ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4( @@ -511,7 +511,7 @@ void test_vluxseg2ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2( @@ -524,7 +524,7 @@ void test_vluxseg2ei32_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1( @@ -537,7 +537,7 @@ void test_vluxseg2ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2( @@ -550,7 +550,7 @@ void test_vluxseg2ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4( @@ -563,7 +563,7 @@ void test_vluxseg2ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1( @@ -576,7 +576,7 @@ void test_vluxseg2ei32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2( @@ -589,7 +589,7 @@ void test_vluxseg2ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4( @@ -602,7 +602,7 @@ void test_vluxseg2ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_m( @@ -615,7 +615,7 @@ void test_vluxseg2ei32_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_m( @@ -628,7 +628,7 @@ void test_vluxseg2ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_m( @@ -641,7 +641,7 @@ void test_vluxseg2ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_m( @@ -654,7 +654,7 @@ void test_vluxseg2ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_m( @@ -667,7 +667,7 @@ void test_vluxseg2ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( @@ -680,7 +680,7 @@ void test_vluxseg2ei32_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_m( @@ -693,7 +693,7 @@ void test_vluxseg2ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_m( @@ -706,7 +706,7 @@ void test_vluxseg2ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_m( @@ -719,7 +719,7 @@ void test_vluxseg2ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_m( @@ -732,7 +732,7 @@ void test_vluxseg2ei32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_m( @@ -745,7 +745,7 @@ void test_vluxseg2ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_m( @@ -758,7 +758,7 @@ void test_vluxseg2ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m( @@ -771,7 +771,7 @@ void test_vluxseg2ei32_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_m( @@ -784,7 +784,7 @@ void test_vluxseg2ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_m( @@ -797,7 +797,7 @@ void test_vluxseg2ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( @@ -810,7 +810,7 @@ void test_vluxseg2ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_m( @@ -823,7 +823,7 @@ void test_vluxseg2ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m( @@ -836,7 +836,7 @@ void test_vluxseg2ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, con // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_m( @@ -849,7 +849,7 @@ void test_vluxseg2ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_m( @@ -862,7 +862,7 @@ void test_vluxseg2ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_m( @@ -875,7 +875,7 @@ void test_vluxseg2ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_m( @@ -888,7 +888,7 @@ void test_vluxseg2ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m( @@ -901,7 +901,7 @@ void test_vluxseg2ei32_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_m( @@ -914,7 +914,7 @@ void test_vluxseg2ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_m( @@ -927,7 +927,7 @@ void test_vluxseg2ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_m( @@ -940,7 +940,7 @@ void test_vluxseg2ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_m( @@ -953,7 +953,7 @@ void test_vluxseg2ei32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_m( @@ -966,7 +966,7 @@ void test_vluxseg2ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_m( @@ -979,7 +979,7 @@ void test_vluxseg2ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( @@ -992,7 +992,7 @@ void test_vluxseg2ei32_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( @@ -1005,7 +1005,7 @@ void test_vluxseg2ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( @@ -1018,7 +1018,7 @@ void test_vluxseg2ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m( @@ -1031,7 +1031,7 @@ void test_vluxseg2ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( @@ -1044,7 +1044,7 @@ void test_vluxseg2ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( @@ -1057,7 +1057,7 @@ void test_vluxseg2ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, c // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( @@ -1070,7 +1070,7 @@ void test_vluxseg2ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_m( @@ -1083,7 +1083,7 @@ void test_vluxseg2ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_m( @@ -1096,7 +1096,7 @@ void test_vluxseg2ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_m( @@ -1109,7 +1109,7 @@ void test_vluxseg2ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( @@ -1122,7 +1122,7 @@ void test_vluxseg2ei32_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_m( @@ -1135,7 +1135,7 @@ void test_vluxseg2ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_m( @@ -1148,7 +1148,7 @@ void test_vluxseg2ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_m( @@ -1161,7 +1161,7 @@ void test_vluxseg2ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_m( @@ -1174,7 +1174,7 @@ void test_vluxseg2ei32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_m( @@ -1187,7 +1187,7 @@ void test_vluxseg2ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_m( @@ -1200,6 +1200,6 @@ void test_vluxseg2ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c index f7192c2cce57..616dcf65dc02 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei64.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2( @@ -30,7 +30,7 @@ void test_vluxseg2ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1( @@ -43,7 +43,7 @@ void test_vluxseg2ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2( @@ -56,7 +56,7 @@ void test_vluxseg2ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2( @@ -69,7 +69,7 @@ void test_vluxseg2ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1( @@ -82,7 +82,7 @@ void test_vluxseg2ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const floa // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2( @@ -95,7 +95,7 @@ void test_vluxseg2ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4( @@ -108,7 +108,7 @@ void test_vluxseg2ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1( @@ -121,7 +121,7 @@ void test_vluxseg2ei64_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2( @@ -134,7 +134,7 @@ void test_vluxseg2ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4( @@ -147,7 +147,7 @@ void test_vluxseg2ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8( @@ -160,7 +160,7 @@ void test_vluxseg2ei64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf8(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4( @@ -173,7 +173,7 @@ void test_vluxseg2ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2( @@ -186,7 +186,7 @@ void test_vluxseg2ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1( @@ -199,7 +199,7 @@ void test_vluxseg2ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i8m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4( @@ -212,7 +212,7 @@ void test_vluxseg2ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2( @@ -225,7 +225,7 @@ void test_vluxseg2ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1( @@ -238,7 +238,7 @@ void test_vluxseg2ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2( @@ -251,7 +251,7 @@ void test_vluxseg2ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2( @@ -264,7 +264,7 @@ void test_vluxseg2ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1( @@ -277,7 +277,7 @@ void test_vluxseg2ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2( @@ -290,7 +290,7 @@ void test_vluxseg2ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4( @@ -303,7 +303,7 @@ void test_vluxseg2ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1( @@ -316,7 +316,7 @@ void test_vluxseg2ei64_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2( @@ -329,7 +329,7 @@ void test_vluxseg2ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4( @@ -342,7 +342,7 @@ void test_vluxseg2ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8( @@ -355,7 +355,7 @@ void test_vluxseg2ei64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *ba // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf8(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4( @@ -368,7 +368,7 @@ void test_vluxseg2ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2( @@ -381,7 +381,7 @@ void test_vluxseg2ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1( @@ -394,7 +394,7 @@ void test_vluxseg2ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u8m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4( @@ -407,7 +407,7 @@ void test_vluxseg2ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2( @@ -420,7 +420,7 @@ void test_vluxseg2ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1( @@ -433,7 +433,7 @@ void test_vluxseg2ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2( @@ -446,7 +446,7 @@ void test_vluxseg2ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2( @@ -459,7 +459,7 @@ void test_vluxseg2ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1( @@ -472,7 +472,7 @@ void test_vluxseg2ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2( @@ -485,7 +485,7 @@ void test_vluxseg2ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4( @@ -498,7 +498,7 @@ void test_vluxseg2ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1( @@ -511,7 +511,7 @@ void test_vluxseg2ei64_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2( @@ -524,7 +524,7 @@ void test_vluxseg2ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4( @@ -537,7 +537,7 @@ void test_vluxseg2ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_m( @@ -550,7 +550,7 @@ void test_vluxseg2ei64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_m( @@ -563,7 +563,7 @@ void test_vluxseg2ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_m( @@ -576,7 +576,7 @@ void test_vluxseg2ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_m( @@ -589,7 +589,7 @@ void test_vluxseg2ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( @@ -602,7 +602,7 @@ void test_vluxseg2ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_m( @@ -615,7 +615,7 @@ void test_vluxseg2ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_m( @@ -628,7 +628,7 @@ void test_vluxseg2ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_m( @@ -641,7 +641,7 @@ void test_vluxseg2ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_m( @@ -654,7 +654,7 @@ void test_vluxseg2ei64_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_m( @@ -667,7 +667,7 @@ void test_vluxseg2ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_m( @@ -680,7 +680,7 @@ void test_vluxseg2ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m( @@ -693,7 +693,7 @@ void test_vluxseg2ei64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_m( @@ -706,7 +706,7 @@ void test_vluxseg2ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_m( @@ -719,7 +719,7 @@ void test_vluxseg2ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_m( @@ -732,7 +732,7 @@ void test_vluxseg2ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m( @@ -745,7 +745,7 @@ void test_vluxseg2ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, con // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_m( @@ -758,7 +758,7 @@ void test_vluxseg2ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m( @@ -771,7 +771,7 @@ void test_vluxseg2ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_m( @@ -784,7 +784,7 @@ void test_vluxseg2ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m( @@ -797,7 +797,7 @@ void test_vluxseg2ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_m( @@ -810,7 +810,7 @@ void test_vluxseg2ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_m( @@ -823,7 +823,7 @@ void test_vluxseg2ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_m( @@ -836,7 +836,7 @@ void test_vluxseg2ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_m( @@ -849,7 +849,7 @@ void test_vluxseg2ei64_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_m( @@ -862,7 +862,7 @@ void test_vluxseg2ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_m( @@ -875,7 +875,7 @@ void test_vluxseg2ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( @@ -888,7 +888,7 @@ void test_vluxseg2ei64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( @@ -901,7 +901,7 @@ void test_vluxseg2ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( @@ -914,7 +914,7 @@ void test_vluxseg2ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_m( @@ -927,7 +927,7 @@ void test_vluxseg2ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( @@ -940,7 +940,7 @@ void test_vluxseg2ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( @@ -953,7 +953,7 @@ void test_vluxseg2ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_m( @@ -966,7 +966,7 @@ void test_vluxseg2ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_m( @@ -979,7 +979,7 @@ void test_vluxseg2ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( @@ -992,7 +992,7 @@ void test_vluxseg2ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_m( @@ -1005,7 +1005,7 @@ void test_vluxseg2ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_m( @@ -1018,7 +1018,7 @@ void test_vluxseg2ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_m( @@ -1031,7 +1031,7 @@ void test_vluxseg2ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_m( @@ -1044,7 +1044,7 @@ void test_vluxseg2ei64_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_m( @@ -1057,7 +1057,7 @@ void test_vluxseg2ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_m( @@ -1070,6 +1070,6 @@ void test_vluxseg2ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c index 1693432cd576..652608317ee8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg2ei8.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2( @@ -30,7 +30,7 @@ void test_vluxseg2ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Floa // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1( @@ -43,7 +43,7 @@ void test_vluxseg2ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Floa // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2( @@ -56,7 +56,7 @@ void test_vluxseg2ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4( @@ -69,7 +69,7 @@ void test_vluxseg2ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2( @@ -82,7 +82,7 @@ void test_vluxseg2ei8_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1( @@ -95,7 +95,7 @@ void test_vluxseg2ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2( @@ -108,7 +108,7 @@ void test_vluxseg2ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *b // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4( @@ -121,7 +121,7 @@ void test_vluxseg2ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *b // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1( @@ -134,7 +134,7 @@ void test_vluxseg2ei8_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *b // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2( @@ -147,7 +147,7 @@ void test_vluxseg2ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4( @@ -160,7 +160,7 @@ void test_vluxseg2ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8( @@ -173,7 +173,7 @@ void test_vluxseg2ei8_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf8(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4( @@ -186,7 +186,7 @@ void test_vluxseg2ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2( @@ -199,7 +199,7 @@ void test_vluxseg2ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1( @@ -212,7 +212,7 @@ void test_vluxseg2ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2( @@ -225,7 +225,7 @@ void test_vluxseg2ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4( @@ -238,7 +238,7 @@ void test_vluxseg2ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4( @@ -251,7 +251,7 @@ void test_vluxseg2ei8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2( @@ -264,7 +264,7 @@ void test_vluxseg2ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1( @@ -277,7 +277,7 @@ void test_vluxseg2ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2( @@ -290,7 +290,7 @@ void test_vluxseg2ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4( @@ -303,7 +303,7 @@ void test_vluxseg2ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2( @@ -316,7 +316,7 @@ void test_vluxseg2ei8_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1( @@ -329,7 +329,7 @@ void test_vluxseg2ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2( @@ -342,7 +342,7 @@ void test_vluxseg2ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4( @@ -355,7 +355,7 @@ void test_vluxseg2ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1( @@ -368,7 +368,7 @@ void test_vluxseg2ei8_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2( @@ -381,7 +381,7 @@ void test_vluxseg2ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4( @@ -394,7 +394,7 @@ void test_vluxseg2ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8( @@ -407,7 +407,7 @@ void test_vluxseg2ei8_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *bas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf8(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf8(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4( @@ -420,7 +420,7 @@ void test_vluxseg2ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *b // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2( @@ -433,7 +433,7 @@ void test_vluxseg2ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *b // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1( @@ -446,7 +446,7 @@ void test_vluxseg2ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *b // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2( @@ -459,7 +459,7 @@ void test_vluxseg2ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4( @@ -472,7 +472,7 @@ void test_vluxseg2ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4( @@ -485,7 +485,7 @@ void test_vluxseg2ei8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2( @@ -498,7 +498,7 @@ void test_vluxseg2ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1( @@ -511,7 +511,7 @@ void test_vluxseg2ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2( @@ -524,7 +524,7 @@ void test_vluxseg2ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4( @@ -537,7 +537,7 @@ void test_vluxseg2ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2( @@ -550,7 +550,7 @@ void test_vluxseg2ei8_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u32mf2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32mf2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1( @@ -563,7 +563,7 @@ void test_vluxseg2ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2( @@ -576,7 +576,7 @@ void test_vluxseg2ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4( @@ -589,7 +589,7 @@ void test_vluxseg2ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1( @@ -602,7 +602,7 @@ void test_vluxseg2ei8_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m1(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m1(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2( @@ -615,7 +615,7 @@ void test_vluxseg2ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m2(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m2(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4( @@ -628,7 +628,7 @@ void test_vluxseg2ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m4(v0, v1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m4(v0, v1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_m( @@ -641,7 +641,7 @@ void test_vluxseg2ei8_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_m( @@ -654,7 +654,7 @@ void test_vluxseg2ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_m( @@ -667,7 +667,7 @@ void test_vluxseg2ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_m( @@ -680,7 +680,7 @@ void test_vluxseg2ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_m( @@ -693,7 +693,7 @@ void test_vluxseg2ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( @@ -706,7 +706,7 @@ void test_vluxseg2ei8_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_m( @@ -719,7 +719,7 @@ void test_vluxseg2ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_m( @@ -732,7 +732,7 @@ void test_vluxseg2ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_m( @@ -745,7 +745,7 @@ void test_vluxseg2ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_m( @@ -758,7 +758,7 @@ void test_vluxseg2ei8_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_m( @@ -771,7 +771,7 @@ void test_vluxseg2ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_m( @@ -784,7 +784,7 @@ void test_vluxseg2ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( @@ -797,7 +797,7 @@ void test_vluxseg2ei8_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_m( @@ -810,7 +810,7 @@ void test_vluxseg2ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_m( @@ -823,7 +823,7 @@ void test_vluxseg2ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_m( @@ -836,7 +836,7 @@ void test_vluxseg2ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_m( @@ -849,7 +849,7 @@ void test_vluxseg2ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, cons // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_m( @@ -862,7 +862,7 @@ void test_vluxseg2ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, cons // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m( @@ -875,7 +875,7 @@ void test_vluxseg2ei8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, cons // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_m( @@ -888,7 +888,7 @@ void test_vluxseg2ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vluxseg2ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_m( @@ -914,7 +914,7 @@ void test_vluxseg2ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_m( @@ -927,7 +927,7 @@ void test_vluxseg2ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m( @@ -940,7 +940,7 @@ void test_vluxseg2ei8_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, c // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_m( @@ -953,7 +953,7 @@ void test_vluxseg2ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_m( @@ -966,7 +966,7 @@ void test_vluxseg2ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_m( @@ -979,7 +979,7 @@ void test_vluxseg2ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_m( @@ -992,7 +992,7 @@ void test_vluxseg2ei8_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, c // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_m( @@ -1005,7 +1005,7 @@ void test_vluxseg2ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_m( @@ -1018,7 +1018,7 @@ void test_vluxseg2ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( @@ -1031,7 +1031,7 @@ void test_vluxseg2ei8_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf8_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf8_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( @@ -1044,7 +1044,7 @@ void test_vluxseg2ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( @@ -1057,7 +1057,7 @@ void test_vluxseg2ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m( @@ -1070,7 +1070,7 @@ void test_vluxseg2ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_m( @@ -1083,7 +1083,7 @@ void test_vluxseg2ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, co // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_m( @@ -1096,7 +1096,7 @@ void test_vluxseg2ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, co // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( @@ -1109,7 +1109,7 @@ void test_vluxseg2ei8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, co // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( @@ -1122,7 +1122,7 @@ void test_vluxseg2ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_m( @@ -1135,7 +1135,7 @@ void test_vluxseg2ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_m( @@ -1148,7 +1148,7 @@ void test_vluxseg2ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_m( @@ -1161,7 +1161,7 @@ void test_vluxseg2ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( @@ -1174,7 +1174,7 @@ void test_vluxseg2ei8_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u32mf2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32mf2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_m( @@ -1187,7 +1187,7 @@ void test_vluxseg2ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_m( @@ -1200,7 +1200,7 @@ void test_vluxseg2ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_m( @@ -1213,7 +1213,7 @@ void test_vluxseg2ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m4_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_m( @@ -1226,7 +1226,7 @@ void test_vluxseg2ei8_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m1_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m1_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_m( @@ -1239,7 +1239,7 @@ void test_vluxseg2ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m2_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m2_m(v0, v1, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_m( @@ -1252,6 +1252,6 @@ void test_vluxseg2ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m4_m(v0, v1, mask, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m4_m(v0, v1, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c index 416943ece0ce..aab5c8e400d5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei16.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2( @@ -34,7 +34,7 @@ void test_vluxseg3ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1( @@ -49,7 +49,7 @@ void test_vluxseg3ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2( @@ -64,7 +64,7 @@ void test_vluxseg3ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2( @@ -79,7 +79,7 @@ void test_vluxseg3ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1( @@ -94,7 +94,7 @@ void test_vluxseg3ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2( @@ -109,7 +109,7 @@ void test_vluxseg3ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1( @@ -124,7 +124,7 @@ void test_vluxseg3ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2( @@ -139,7 +139,7 @@ void test_vluxseg3ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8( @@ -154,7 +154,7 @@ void test_vluxseg3ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4( @@ -169,7 +169,7 @@ void test_vluxseg3ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2( @@ -184,7 +184,7 @@ void test_vluxseg3ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1( @@ -199,7 +199,7 @@ void test_vluxseg3ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2( @@ -214,7 +214,7 @@ void test_vluxseg3ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4( @@ -229,7 +229,7 @@ void test_vluxseg3ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2( @@ -244,7 +244,7 @@ void test_vluxseg3ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1( @@ -259,7 +259,7 @@ void test_vluxseg3ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2( @@ -274,7 +274,7 @@ void test_vluxseg3ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2( @@ -289,7 +289,7 @@ void test_vluxseg3ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1( @@ -304,7 +304,7 @@ void test_vluxseg3ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2( @@ -319,7 +319,7 @@ void test_vluxseg3ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1( @@ -334,7 +334,7 @@ void test_vluxseg3ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2( @@ -349,7 +349,7 @@ void test_vluxseg3ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8( @@ -364,7 +364,7 @@ void test_vluxseg3ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4( @@ -379,7 +379,7 @@ void test_vluxseg3ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2( @@ -394,7 +394,7 @@ void test_vluxseg3ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1( @@ -409,7 +409,7 @@ void test_vluxseg3ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2( @@ -424,7 +424,7 @@ void test_vluxseg3ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4( @@ -439,7 +439,7 @@ void test_vluxseg3ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2( @@ -454,7 +454,7 @@ void test_vluxseg3ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1( @@ -469,7 +469,7 @@ void test_vluxseg3ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2( @@ -484,7 +484,7 @@ void test_vluxseg3ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2( @@ -499,7 +499,7 @@ void test_vluxseg3ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1( @@ -514,7 +514,7 @@ void test_vluxseg3ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2( @@ -529,7 +529,7 @@ void test_vluxseg3ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1( @@ -544,7 +544,7 @@ void test_vluxseg3ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2( @@ -559,7 +559,7 @@ void test_vluxseg3ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_m( @@ -574,7 +574,7 @@ void test_vluxseg3ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_m( @@ -589,7 +589,7 @@ void test_vluxseg3ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_m( @@ -604,7 +604,7 @@ void test_vluxseg3ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_m( @@ -619,7 +619,7 @@ void test_vluxseg3ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vluxseg3ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_m( @@ -649,7 +649,7 @@ void test_vluxseg3ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_m( @@ -664,7 +664,7 @@ void test_vluxseg3ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_m( @@ -679,7 +679,7 @@ void test_vluxseg3ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_m( @@ -694,7 +694,7 @@ void test_vluxseg3ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_m( @@ -709,7 +709,7 @@ void test_vluxseg3ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_m( @@ -724,7 +724,7 @@ void test_vluxseg3ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vluxseg3ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_m( @@ -754,7 +754,7 @@ void test_vluxseg3ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_m( @@ -769,7 +769,7 @@ void test_vluxseg3ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( @@ -784,7 +784,7 @@ void test_vluxseg3ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_m( @@ -799,7 +799,7 @@ void test_vluxseg3ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_m( @@ -814,7 +814,7 @@ void test_vluxseg3ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_m( @@ -829,7 +829,7 @@ void test_vluxseg3ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vluxseg3ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_m( @@ -859,7 +859,7 @@ void test_vluxseg3ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_m( @@ -874,7 +874,7 @@ void test_vluxseg3ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_m( @@ -889,7 +889,7 @@ void test_vluxseg3ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_m( @@ -904,7 +904,7 @@ void test_vluxseg3ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( @@ -919,7 +919,7 @@ void test_vluxseg3ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( @@ -934,7 +934,7 @@ void test_vluxseg3ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vluxseg3ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_m( @@ -964,7 +964,7 @@ void test_vluxseg3ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_m( @@ -979,7 +979,7 @@ void test_vluxseg3ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( @@ -994,7 +994,7 @@ void test_vluxseg3ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( @@ -1009,7 +1009,7 @@ void test_vluxseg3ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_m( @@ -1024,7 +1024,7 @@ void test_vluxseg3ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_m( @@ -1039,7 +1039,7 @@ void test_vluxseg3ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vluxseg3ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_m( @@ -1069,7 +1069,7 @@ void test_vluxseg3ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_m( @@ -1084,7 +1084,7 @@ void test_vluxseg3ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_m( @@ -1099,7 +1099,7 @@ void test_vluxseg3ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_m( @@ -1114,6 +1114,6 @@ void test_vluxseg3ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c index 60f7908b17eb..87d65ed0cce3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei32.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2( @@ -34,7 +34,7 @@ void test_vluxseg3ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1( @@ -49,7 +49,7 @@ void test_vluxseg3ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2( @@ -64,7 +64,7 @@ void test_vluxseg3ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2( @@ -79,7 +79,7 @@ void test_vluxseg3ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1( @@ -94,7 +94,7 @@ void test_vluxseg3ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2( @@ -109,7 +109,7 @@ void test_vluxseg3ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1( @@ -124,7 +124,7 @@ void test_vluxseg3ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2( @@ -139,7 +139,7 @@ void test_vluxseg3ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8( @@ -154,7 +154,7 @@ void test_vluxseg3ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4( @@ -169,7 +169,7 @@ void test_vluxseg3ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2( @@ -184,7 +184,7 @@ void test_vluxseg3ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1( @@ -199,7 +199,7 @@ void test_vluxseg3ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2( @@ -214,7 +214,7 @@ void test_vluxseg3ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4( @@ -229,7 +229,7 @@ void test_vluxseg3ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2( @@ -244,7 +244,7 @@ void test_vluxseg3ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1( @@ -259,7 +259,7 @@ void test_vluxseg3ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2( @@ -274,7 +274,7 @@ void test_vluxseg3ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2( @@ -289,7 +289,7 @@ void test_vluxseg3ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1( @@ -304,7 +304,7 @@ void test_vluxseg3ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2( @@ -319,7 +319,7 @@ void test_vluxseg3ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1( @@ -334,7 +334,7 @@ void test_vluxseg3ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2( @@ -349,7 +349,7 @@ void test_vluxseg3ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8( @@ -364,7 +364,7 @@ void test_vluxseg3ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4( @@ -379,7 +379,7 @@ void test_vluxseg3ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2( @@ -394,7 +394,7 @@ void test_vluxseg3ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1( @@ -409,7 +409,7 @@ void test_vluxseg3ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2( @@ -424,7 +424,7 @@ void test_vluxseg3ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4( @@ -439,7 +439,7 @@ void test_vluxseg3ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2( @@ -454,7 +454,7 @@ void test_vluxseg3ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1( @@ -469,7 +469,7 @@ void test_vluxseg3ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2( @@ -484,7 +484,7 @@ void test_vluxseg3ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2( @@ -499,7 +499,7 @@ void test_vluxseg3ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1( @@ -514,7 +514,7 @@ void test_vluxseg3ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2( @@ -529,7 +529,7 @@ void test_vluxseg3ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1( @@ -544,7 +544,7 @@ void test_vluxseg3ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2( @@ -559,7 +559,7 @@ void test_vluxseg3ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_m( @@ -574,7 +574,7 @@ void test_vluxseg3ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_m( @@ -589,7 +589,7 @@ void test_vluxseg3ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_m( @@ -604,7 +604,7 @@ void test_vluxseg3ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_m( @@ -619,7 +619,7 @@ void test_vluxseg3ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vluxseg3ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_m( @@ -649,7 +649,7 @@ void test_vluxseg3ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_m( @@ -664,7 +664,7 @@ void test_vluxseg3ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_m( @@ -679,7 +679,7 @@ void test_vluxseg3ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_m( @@ -694,7 +694,7 @@ void test_vluxseg3ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_m( @@ -709,7 +709,7 @@ void test_vluxseg3ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_m( @@ -724,7 +724,7 @@ void test_vluxseg3ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vluxseg3ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( @@ -754,7 +754,7 @@ void test_vluxseg3ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_m( @@ -769,7 +769,7 @@ void test_vluxseg3ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_m( @@ -784,7 +784,7 @@ void test_vluxseg3ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_m( @@ -799,7 +799,7 @@ void test_vluxseg3ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_m( @@ -814,7 +814,7 @@ void test_vluxseg3ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_m( @@ -829,7 +829,7 @@ void test_vluxseg3ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vluxseg3ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_m( @@ -859,7 +859,7 @@ void test_vluxseg3ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_m( @@ -874,7 +874,7 @@ void test_vluxseg3ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_m( @@ -889,7 +889,7 @@ void test_vluxseg3ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_m( @@ -904,7 +904,7 @@ void test_vluxseg3ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( @@ -919,7 +919,7 @@ void test_vluxseg3ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( @@ -934,7 +934,7 @@ void test_vluxseg3ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vluxseg3ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_m( @@ -964,7 +964,7 @@ void test_vluxseg3ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( @@ -979,7 +979,7 @@ void test_vluxseg3ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( @@ -994,7 +994,7 @@ void test_vluxseg3ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( @@ -1009,7 +1009,7 @@ void test_vluxseg3ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_m( @@ -1024,7 +1024,7 @@ void test_vluxseg3ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_m( @@ -1039,7 +1039,7 @@ void test_vluxseg3ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vluxseg3ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_m( @@ -1069,7 +1069,7 @@ void test_vluxseg3ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_m( @@ -1084,7 +1084,7 @@ void test_vluxseg3ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_m( @@ -1099,7 +1099,7 @@ void test_vluxseg3ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_m( @@ -1114,6 +1114,6 @@ void test_vluxseg3ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c index 5e29143a0510..62d810766742 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei64.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2( @@ -34,7 +34,7 @@ void test_vluxseg3ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1( @@ -49,7 +49,7 @@ void test_vluxseg3ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2( @@ -64,7 +64,7 @@ void test_vluxseg3ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2( @@ -79,7 +79,7 @@ void test_vluxseg3ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1( @@ -94,7 +94,7 @@ void test_vluxseg3ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2( @@ -109,7 +109,7 @@ void test_vluxseg3ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1( @@ -124,7 +124,7 @@ void test_vluxseg3ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2( @@ -139,7 +139,7 @@ void test_vluxseg3ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8( @@ -154,7 +154,7 @@ void test_vluxseg3ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4( @@ -169,7 +169,7 @@ void test_vluxseg3ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2( @@ -184,7 +184,7 @@ void test_vluxseg3ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1( @@ -199,7 +199,7 @@ void test_vluxseg3ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4( @@ -214,7 +214,7 @@ void test_vluxseg3ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2( @@ -229,7 +229,7 @@ void test_vluxseg3ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1( @@ -244,7 +244,7 @@ void test_vluxseg3ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2( @@ -259,7 +259,7 @@ void test_vluxseg3ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2( @@ -274,7 +274,7 @@ void test_vluxseg3ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1( @@ -289,7 +289,7 @@ void test_vluxseg3ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2( @@ -304,7 +304,7 @@ void test_vluxseg3ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1( @@ -319,7 +319,7 @@ void test_vluxseg3ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2( @@ -334,7 +334,7 @@ void test_vluxseg3ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8( @@ -349,7 +349,7 @@ void test_vluxseg3ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, c // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4( @@ -364,7 +364,7 @@ void test_vluxseg3ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2( @@ -379,7 +379,7 @@ void test_vluxseg3ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1( @@ -394,7 +394,7 @@ void test_vluxseg3ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4( @@ -409,7 +409,7 @@ void test_vluxseg3ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2( @@ -424,7 +424,7 @@ void test_vluxseg3ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1( @@ -439,7 +439,7 @@ void test_vluxseg3ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2( @@ -454,7 +454,7 @@ void test_vluxseg3ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2( @@ -469,7 +469,7 @@ void test_vluxseg3ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1( @@ -484,7 +484,7 @@ void test_vluxseg3ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2( @@ -499,7 +499,7 @@ void test_vluxseg3ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1( @@ -514,7 +514,7 @@ void test_vluxseg3ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2( @@ -529,7 +529,7 @@ void test_vluxseg3ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_m( @@ -544,7 +544,7 @@ void test_vluxseg3ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_m( @@ -559,7 +559,7 @@ void test_vluxseg3ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_m( @@ -574,7 +574,7 @@ void test_vluxseg3ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_m( @@ -589,7 +589,7 @@ void test_vluxseg3ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( @@ -604,7 +604,7 @@ void test_vluxseg3ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_m( @@ -619,7 +619,7 @@ void test_vluxseg3ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_m( @@ -634,7 +634,7 @@ void test_vluxseg3ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_m( @@ -649,7 +649,7 @@ void test_vluxseg3ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_m( @@ -664,7 +664,7 @@ void test_vluxseg3ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_m( @@ -679,7 +679,7 @@ void test_vluxseg3ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_m( @@ -694,7 +694,7 @@ void test_vluxseg3ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_m( @@ -709,7 +709,7 @@ void test_vluxseg3ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_m( @@ -724,7 +724,7 @@ void test_vluxseg3ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_m( @@ -739,7 +739,7 @@ void test_vluxseg3ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_m( @@ -754,7 +754,7 @@ void test_vluxseg3ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_m( @@ -769,7 +769,7 @@ void test_vluxseg3ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_m( @@ -784,7 +784,7 @@ void test_vluxseg3ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_m( @@ -799,7 +799,7 @@ void test_vluxseg3ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_m( @@ -814,7 +814,7 @@ void test_vluxseg3ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_m( @@ -829,7 +829,7 @@ void test_vluxseg3ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_m( @@ -844,7 +844,7 @@ void test_vluxseg3ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_m( @@ -859,7 +859,7 @@ void test_vluxseg3ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( @@ -874,7 +874,7 @@ void test_vluxseg3ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( @@ -889,7 +889,7 @@ void test_vluxseg3ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( @@ -904,7 +904,7 @@ void test_vluxseg3ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_m( @@ -919,7 +919,7 @@ void test_vluxseg3ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( @@ -934,7 +934,7 @@ void test_vluxseg3ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( @@ -949,7 +949,7 @@ void test_vluxseg3ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_m( @@ -964,7 +964,7 @@ void test_vluxseg3ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_m( @@ -979,7 +979,7 @@ void test_vluxseg3ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( @@ -994,7 +994,7 @@ void test_vluxseg3ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_m( @@ -1009,7 +1009,7 @@ void test_vluxseg3ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_m( @@ -1024,7 +1024,7 @@ void test_vluxseg3ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_m( @@ -1039,7 +1039,7 @@ void test_vluxseg3ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_m( @@ -1054,6 +1054,6 @@ void test_vluxseg3ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c index dd9c1fb8ea12..9aa3a189b497 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg3ei8.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2( @@ -34,7 +34,7 @@ void test_vluxseg3ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1( @@ -49,7 +49,7 @@ void test_vluxseg3ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2( @@ -64,7 +64,7 @@ void test_vluxseg3ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2( @@ -79,7 +79,7 @@ void test_vluxseg3ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1( @@ -94,7 +94,7 @@ void test_vluxseg3ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2( @@ -109,7 +109,7 @@ void test_vluxseg3ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1( @@ -124,7 +124,7 @@ void test_vluxseg3ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2( @@ -139,7 +139,7 @@ void test_vluxseg3ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8( @@ -154,7 +154,7 @@ void test_vluxseg3ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4( @@ -169,7 +169,7 @@ void test_vluxseg3ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2( @@ -184,7 +184,7 @@ void test_vluxseg3ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1( @@ -199,7 +199,7 @@ void test_vluxseg3ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2( @@ -214,7 +214,7 @@ void test_vluxseg3ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4( @@ -229,7 +229,7 @@ void test_vluxseg3ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2( @@ -244,7 +244,7 @@ void test_vluxseg3ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1( @@ -259,7 +259,7 @@ void test_vluxseg3ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2( @@ -274,7 +274,7 @@ void test_vluxseg3ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2( @@ -289,7 +289,7 @@ void test_vluxseg3ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1( @@ -304,7 +304,7 @@ void test_vluxseg3ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2( @@ -319,7 +319,7 @@ void test_vluxseg3ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1( @@ -334,7 +334,7 @@ void test_vluxseg3ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2( @@ -349,7 +349,7 @@ void test_vluxseg3ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8( @@ -364,7 +364,7 @@ void test_vluxseg3ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, co // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf8(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf8(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4( @@ -379,7 +379,7 @@ void test_vluxseg3ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2( @@ -394,7 +394,7 @@ void test_vluxseg3ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1( @@ -409,7 +409,7 @@ void test_vluxseg3ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2( @@ -424,7 +424,7 @@ void test_vluxseg3ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4( @@ -439,7 +439,7 @@ void test_vluxseg3ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, con // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf4(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf4(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2( @@ -454,7 +454,7 @@ void test_vluxseg3ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1( @@ -469,7 +469,7 @@ void test_vluxseg3ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2( @@ -484,7 +484,7 @@ void test_vluxseg3ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2( @@ -499,7 +499,7 @@ void test_vluxseg3ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u32mf2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32mf2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1( @@ -514,7 +514,7 @@ void test_vluxseg3ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2( @@ -529,7 +529,7 @@ void test_vluxseg3ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1( @@ -544,7 +544,7 @@ void test_vluxseg3ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m1(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m1(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2( @@ -559,7 +559,7 @@ void test_vluxseg3ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m2(v0, v1, v2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m2(v0, v1, v2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_m( @@ -574,7 +574,7 @@ void test_vluxseg3ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_m( @@ -589,7 +589,7 @@ void test_vluxseg3ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_m( @@ -604,7 +604,7 @@ void test_vluxseg3ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_m( @@ -619,7 +619,7 @@ void test_vluxseg3ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vluxseg3ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_m( @@ -649,7 +649,7 @@ void test_vluxseg3ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_m( @@ -664,7 +664,7 @@ void test_vluxseg3ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_m( @@ -679,7 +679,7 @@ void test_vluxseg3ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_m( @@ -694,7 +694,7 @@ void test_vluxseg3ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m( @@ -709,7 +709,7 @@ void test_vluxseg3ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_m( @@ -724,7 +724,7 @@ void test_vluxseg3ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vluxseg3ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_m( @@ -754,7 +754,7 @@ void test_vluxseg3ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_m( @@ -769,7 +769,7 @@ void test_vluxseg3ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_m( @@ -784,7 +784,7 @@ void test_vluxseg3ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_m( @@ -799,7 +799,7 @@ void test_vluxseg3ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_m( @@ -814,7 +814,7 @@ void test_vluxseg3ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_m( @@ -829,7 +829,7 @@ void test_vluxseg3ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vluxseg3ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_m( @@ -859,7 +859,7 @@ void test_vluxseg3ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_m( @@ -874,7 +874,7 @@ void test_vluxseg3ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_m( @@ -889,7 +889,7 @@ void test_vluxseg3ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_m( @@ -904,7 +904,7 @@ void test_vluxseg3ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( @@ -919,7 +919,7 @@ void test_vluxseg3ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( @@ -934,7 +934,7 @@ void test_vluxseg3ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vluxseg3ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_m( @@ -964,7 +964,7 @@ void test_vluxseg3ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_m( @@ -979,7 +979,7 @@ void test_vluxseg3ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( @@ -994,7 +994,7 @@ void test_vluxseg3ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( @@ -1009,7 +1009,7 @@ void test_vluxseg3ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_m( @@ -1024,7 +1024,7 @@ void test_vluxseg3ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_m( @@ -1039,7 +1039,7 @@ void test_vluxseg3ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vluxseg3ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_m( @@ -1069,7 +1069,7 @@ void test_vluxseg3ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_m( @@ -1084,7 +1084,7 @@ void test_vluxseg3ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m2_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_m( @@ -1099,7 +1099,7 @@ void test_vluxseg3ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m1_m(v0, v1, v2, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_m( @@ -1114,6 +1114,6 @@ void test_vluxseg3ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m2_m(v0, v1, v2, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c index 2e71ef086bfd..478c8080a3fc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei16.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2( @@ -38,7 +38,7 @@ void test_vluxseg4ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1( @@ -55,7 +55,7 @@ void test_vluxseg4ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2( @@ -72,7 +72,7 @@ void test_vluxseg4ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2( @@ -89,7 +89,7 @@ void test_vluxseg4ei16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1( @@ -106,7 +106,7 @@ void test_vluxseg4ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2( @@ -123,7 +123,7 @@ void test_vluxseg4ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1( @@ -140,7 +140,7 @@ void test_vluxseg4ei16_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2( @@ -157,7 +157,7 @@ void test_vluxseg4ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8( @@ -174,7 +174,7 @@ void test_vluxseg4ei16_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4( @@ -191,7 +191,7 @@ void test_vluxseg4ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2( @@ -208,7 +208,7 @@ void test_vluxseg4ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1( @@ -225,7 +225,7 @@ void test_vluxseg4ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2( @@ -242,7 +242,7 @@ void test_vluxseg4ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4( @@ -259,7 +259,7 @@ void test_vluxseg4ei16_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2( @@ -276,7 +276,7 @@ void test_vluxseg4ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1( @@ -293,7 +293,7 @@ void test_vluxseg4ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2( @@ -310,7 +310,7 @@ void test_vluxseg4ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2( @@ -327,7 +327,7 @@ void test_vluxseg4ei16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1( @@ -344,7 +344,7 @@ void test_vluxseg4ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2( @@ -361,7 +361,7 @@ void test_vluxseg4ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1( @@ -378,7 +378,7 @@ void test_vluxseg4ei16_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2( @@ -395,7 +395,7 @@ void test_vluxseg4ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8( @@ -412,7 +412,7 @@ void test_vluxseg4ei16_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4( @@ -429,7 +429,7 @@ void test_vluxseg4ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2( @@ -446,7 +446,7 @@ void test_vluxseg4ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1( @@ -463,7 +463,7 @@ void test_vluxseg4ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2( @@ -480,7 +480,7 @@ void test_vluxseg4ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4( @@ -497,7 +497,7 @@ void test_vluxseg4ei16_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2( @@ -514,7 +514,7 @@ void test_vluxseg4ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1( @@ -531,7 +531,7 @@ void test_vluxseg4ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2( @@ -548,7 +548,7 @@ void test_vluxseg4ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2( @@ -565,7 +565,7 @@ void test_vluxseg4ei16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1( @@ -582,7 +582,7 @@ void test_vluxseg4ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2( @@ -599,7 +599,7 @@ void test_vluxseg4ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1( @@ -616,7 +616,7 @@ void test_vluxseg4ei16_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2( @@ -633,7 +633,7 @@ void test_vluxseg4ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_m( @@ -650,7 +650,7 @@ void test_vluxseg4ei16_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_m( @@ -667,7 +667,7 @@ void test_vluxseg4ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_m( @@ -684,7 +684,7 @@ void test_vluxseg4ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_m( @@ -701,7 +701,7 @@ void test_vluxseg4ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( @@ -718,7 +718,7 @@ void test_vluxseg4ei16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_m( @@ -735,7 +735,7 @@ void test_vluxseg4ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_m( @@ -752,7 +752,7 @@ void test_vluxseg4ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_m( @@ -769,7 +769,7 @@ void test_vluxseg4ei16_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_m( @@ -786,7 +786,7 @@ void test_vluxseg4ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_m( @@ -803,7 +803,7 @@ void test_vluxseg4ei16_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_m( @@ -820,7 +820,7 @@ void test_vluxseg4ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_m( @@ -837,7 +837,7 @@ void test_vluxseg4ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_m( @@ -854,7 +854,7 @@ void test_vluxseg4ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_m( @@ -871,7 +871,7 @@ void test_vluxseg4ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( @@ -888,7 +888,7 @@ void test_vluxseg4ei16_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_m( @@ -905,7 +905,7 @@ void test_vluxseg4ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_m( @@ -922,7 +922,7 @@ void test_vluxseg4ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_m( @@ -939,7 +939,7 @@ void test_vluxseg4ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_m( @@ -956,7 +956,7 @@ void test_vluxseg4ei16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_m( @@ -973,7 +973,7 @@ void test_vluxseg4ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_m( @@ -990,7 +990,7 @@ void test_vluxseg4ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_m( @@ -1007,7 +1007,7 @@ void test_vluxseg4ei16_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_m( @@ -1024,7 +1024,7 @@ void test_vluxseg4ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( @@ -1041,7 +1041,7 @@ void test_vluxseg4ei16_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( @@ -1058,7 +1058,7 @@ void test_vluxseg4ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( @@ -1075,7 +1075,7 @@ void test_vluxseg4ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_m( @@ -1092,7 +1092,7 @@ void test_vluxseg4ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_m( @@ -1109,7 +1109,7 @@ void test_vluxseg4ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( @@ -1126,7 +1126,7 @@ void test_vluxseg4ei16_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( @@ -1143,7 +1143,7 @@ void test_vluxseg4ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_m( @@ -1160,7 +1160,7 @@ void test_vluxseg4ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_m( @@ -1177,7 +1177,7 @@ void test_vluxseg4ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( @@ -1194,7 +1194,7 @@ void test_vluxseg4ei16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_m( @@ -1211,7 +1211,7 @@ void test_vluxseg4ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_m( @@ -1228,7 +1228,7 @@ void test_vluxseg4ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_m( @@ -1245,7 +1245,7 @@ void test_vluxseg4ei16_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_m( @@ -1262,6 +1262,6 @@ void test_vluxseg4ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c index e931b93d81c7..62394abdd5b2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei32.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2( @@ -38,7 +38,7 @@ void test_vluxseg4ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1( @@ -55,7 +55,7 @@ void test_vluxseg4ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2( @@ -72,7 +72,7 @@ void test_vluxseg4ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2( @@ -89,7 +89,7 @@ void test_vluxseg4ei32_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1( @@ -106,7 +106,7 @@ void test_vluxseg4ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2( @@ -123,7 +123,7 @@ void test_vluxseg4ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1( @@ -140,7 +140,7 @@ void test_vluxseg4ei32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2( @@ -157,7 +157,7 @@ void test_vluxseg4ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8( @@ -174,7 +174,7 @@ void test_vluxseg4ei32_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4( @@ -191,7 +191,7 @@ void test_vluxseg4ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2( @@ -208,7 +208,7 @@ void test_vluxseg4ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1( @@ -225,7 +225,7 @@ void test_vluxseg4ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2( @@ -242,7 +242,7 @@ void test_vluxseg4ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4( @@ -259,7 +259,7 @@ void test_vluxseg4ei32_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2( @@ -276,7 +276,7 @@ void test_vluxseg4ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1( @@ -293,7 +293,7 @@ void test_vluxseg4ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2( @@ -310,7 +310,7 @@ void test_vluxseg4ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2( @@ -327,7 +327,7 @@ void test_vluxseg4ei32_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1( @@ -344,7 +344,7 @@ void test_vluxseg4ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2( @@ -361,7 +361,7 @@ void test_vluxseg4ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1( @@ -378,7 +378,7 @@ void test_vluxseg4ei32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2( @@ -395,7 +395,7 @@ void test_vluxseg4ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8( @@ -412,7 +412,7 @@ void test_vluxseg4ei32_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4( @@ -429,7 +429,7 @@ void test_vluxseg4ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2( @@ -446,7 +446,7 @@ void test_vluxseg4ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1( @@ -463,7 +463,7 @@ void test_vluxseg4ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2( @@ -480,7 +480,7 @@ void test_vluxseg4ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4( @@ -497,7 +497,7 @@ void test_vluxseg4ei32_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2( @@ -514,7 +514,7 @@ void test_vluxseg4ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1( @@ -531,7 +531,7 @@ void test_vluxseg4ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2( @@ -548,7 +548,7 @@ void test_vluxseg4ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2( @@ -565,7 +565,7 @@ void test_vluxseg4ei32_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1( @@ -582,7 +582,7 @@ void test_vluxseg4ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2( @@ -599,7 +599,7 @@ void test_vluxseg4ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1( @@ -616,7 +616,7 @@ void test_vluxseg4ei32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2( @@ -633,7 +633,7 @@ void test_vluxseg4ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_m( @@ -650,7 +650,7 @@ void test_vluxseg4ei32_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_m( @@ -667,7 +667,7 @@ void test_vluxseg4ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_m( @@ -684,7 +684,7 @@ void test_vluxseg4ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_m( @@ -701,7 +701,7 @@ void test_vluxseg4ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( @@ -718,7 +718,7 @@ void test_vluxseg4ei32_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_m( @@ -735,7 +735,7 @@ void test_vluxseg4ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_m( @@ -752,7 +752,7 @@ void test_vluxseg4ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_m( @@ -769,7 +769,7 @@ void test_vluxseg4ei32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_m( @@ -786,7 +786,7 @@ void test_vluxseg4ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_m( @@ -803,7 +803,7 @@ void test_vluxseg4ei32_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_m( @@ -820,7 +820,7 @@ void test_vluxseg4ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_m( @@ -837,7 +837,7 @@ void test_vluxseg4ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( @@ -854,7 +854,7 @@ void test_vluxseg4ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_m( @@ -871,7 +871,7 @@ void test_vluxseg4ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_m( @@ -888,7 +888,7 @@ void test_vluxseg4ei32_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_m( @@ -905,7 +905,7 @@ void test_vluxseg4ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_m( @@ -922,7 +922,7 @@ void test_vluxseg4ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_m( @@ -939,7 +939,7 @@ void test_vluxseg4ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_m( @@ -956,7 +956,7 @@ void test_vluxseg4ei32_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_m( @@ -973,7 +973,7 @@ void test_vluxseg4ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_m( @@ -990,7 +990,7 @@ void test_vluxseg4ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_m( @@ -1007,7 +1007,7 @@ void test_vluxseg4ei32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_m( @@ -1024,7 +1024,7 @@ void test_vluxseg4ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( @@ -1041,7 +1041,7 @@ void test_vluxseg4ei32_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( @@ -1058,7 +1058,7 @@ void test_vluxseg4ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( @@ -1075,7 +1075,7 @@ void test_vluxseg4ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_m( @@ -1092,7 +1092,7 @@ void test_vluxseg4ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( @@ -1109,7 +1109,7 @@ void test_vluxseg4ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( @@ -1126,7 +1126,7 @@ void test_vluxseg4ei32_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( @@ -1143,7 +1143,7 @@ void test_vluxseg4ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_m( @@ -1160,7 +1160,7 @@ void test_vluxseg4ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_m( @@ -1177,7 +1177,7 @@ void test_vluxseg4ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( @@ -1194,7 +1194,7 @@ void test_vluxseg4ei32_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_m( @@ -1211,7 +1211,7 @@ void test_vluxseg4ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_m( @@ -1228,7 +1228,7 @@ void test_vluxseg4ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_m( @@ -1245,7 +1245,7 @@ void test_vluxseg4ei32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_m( @@ -1262,6 +1262,6 @@ void test_vluxseg4ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c index 5f567e19b6ed..84dd477f420d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei64.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2( @@ -38,7 +38,7 @@ void test_vluxseg4ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1( @@ -55,7 +55,7 @@ void test_vluxseg4ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2( @@ -72,7 +72,7 @@ void test_vluxseg4ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2( @@ -89,7 +89,7 @@ void test_vluxseg4ei64_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1( @@ -106,7 +106,7 @@ void test_vluxseg4ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2( @@ -123,7 +123,7 @@ void test_vluxseg4ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1( @@ -140,7 +140,7 @@ void test_vluxseg4ei64_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2( @@ -157,7 +157,7 @@ void test_vluxseg4ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8( @@ -174,7 +174,7 @@ void test_vluxseg4ei64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4( @@ -191,7 +191,7 @@ void test_vluxseg4ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2( @@ -208,7 +208,7 @@ void test_vluxseg4ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1( @@ -225,7 +225,7 @@ void test_vluxseg4ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4( @@ -242,7 +242,7 @@ void test_vluxseg4ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2( @@ -259,7 +259,7 @@ void test_vluxseg4ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1( @@ -276,7 +276,7 @@ void test_vluxseg4ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2( @@ -293,7 +293,7 @@ void test_vluxseg4ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2( @@ -310,7 +310,7 @@ void test_vluxseg4ei64_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1( @@ -327,7 +327,7 @@ void test_vluxseg4ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2( @@ -344,7 +344,7 @@ void test_vluxseg4ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1( @@ -361,7 +361,7 @@ void test_vluxseg4ei64_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2( @@ -378,7 +378,7 @@ void test_vluxseg4ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8( @@ -395,7 +395,7 @@ void test_vluxseg4ei64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4( @@ -412,7 +412,7 @@ void test_vluxseg4ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2( @@ -429,7 +429,7 @@ void test_vluxseg4ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1( @@ -446,7 +446,7 @@ void test_vluxseg4ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4( @@ -463,7 +463,7 @@ void test_vluxseg4ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2( @@ -480,7 +480,7 @@ void test_vluxseg4ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1( @@ -497,7 +497,7 @@ void test_vluxseg4ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2( @@ -514,7 +514,7 @@ void test_vluxseg4ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2( @@ -531,7 +531,7 @@ void test_vluxseg4ei64_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1( @@ -548,7 +548,7 @@ void test_vluxseg4ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2( @@ -565,7 +565,7 @@ void test_vluxseg4ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1( @@ -582,7 +582,7 @@ void test_vluxseg4ei64_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2( @@ -599,7 +599,7 @@ void test_vluxseg4ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_m( @@ -616,7 +616,7 @@ void test_vluxseg4ei64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_m( @@ -633,7 +633,7 @@ void test_vluxseg4ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_m( @@ -650,7 +650,7 @@ void test_vluxseg4ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_m( @@ -667,7 +667,7 @@ void test_vluxseg4ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( @@ -684,7 +684,7 @@ void test_vluxseg4ei64_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_m( @@ -701,7 +701,7 @@ void test_vluxseg4ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_m( @@ -718,7 +718,7 @@ void test_vluxseg4ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_m( @@ -735,7 +735,7 @@ void test_vluxseg4ei64_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_m( @@ -752,7 +752,7 @@ void test_vluxseg4ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_m( @@ -769,7 +769,7 @@ void test_vluxseg4ei64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_m( @@ -786,7 +786,7 @@ void test_vluxseg4ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_m( @@ -803,7 +803,7 @@ void test_vluxseg4ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_m( @@ -820,7 +820,7 @@ void test_vluxseg4ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_m( @@ -837,7 +837,7 @@ void test_vluxseg4ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_m( @@ -854,7 +854,7 @@ void test_vluxseg4ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_m( @@ -871,7 +871,7 @@ void test_vluxseg4ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_m( @@ -888,7 +888,7 @@ void test_vluxseg4ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_m( @@ -905,7 +905,7 @@ void test_vluxseg4ei64_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_m( @@ -922,7 +922,7 @@ void test_vluxseg4ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_m( @@ -939,7 +939,7 @@ void test_vluxseg4ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_m( @@ -956,7 +956,7 @@ void test_vluxseg4ei64_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_m( @@ -973,7 +973,7 @@ void test_vluxseg4ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( @@ -990,7 +990,7 @@ void test_vluxseg4ei64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( @@ -1007,7 +1007,7 @@ void test_vluxseg4ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( @@ -1024,7 +1024,7 @@ void test_vluxseg4ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_m( @@ -1041,7 +1041,7 @@ void test_vluxseg4ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( @@ -1058,7 +1058,7 @@ void test_vluxseg4ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( @@ -1075,7 +1075,7 @@ void test_vluxseg4ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_m( @@ -1092,7 +1092,7 @@ void test_vluxseg4ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_m( @@ -1109,7 +1109,7 @@ void test_vluxseg4ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( @@ -1126,7 +1126,7 @@ void test_vluxseg4ei64_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_m( @@ -1143,7 +1143,7 @@ void test_vluxseg4ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_m( @@ -1160,7 +1160,7 @@ void test_vluxseg4ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_m( @@ -1177,7 +1177,7 @@ void test_vluxseg4ei64_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_m( @@ -1194,6 +1194,6 @@ void test_vluxseg4ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c index 513d694c41de..f6fcbd985996 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg4ei8.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2( @@ -38,7 +38,7 @@ void test_vluxseg4ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1( @@ -55,7 +55,7 @@ void test_vluxseg4ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2( @@ -72,7 +72,7 @@ void test_vluxseg4ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2( @@ -89,7 +89,7 @@ void test_vluxseg4ei8_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1( @@ -106,7 +106,7 @@ void test_vluxseg4ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2( @@ -123,7 +123,7 @@ void test_vluxseg4ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1( @@ -140,7 +140,7 @@ void test_vluxseg4ei8_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2( @@ -157,7 +157,7 @@ void test_vluxseg4ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8( @@ -174,7 +174,7 @@ void test_vluxseg4ei8_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4( @@ -191,7 +191,7 @@ void test_vluxseg4ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2( @@ -208,7 +208,7 @@ void test_vluxseg4ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1( @@ -225,7 +225,7 @@ void test_vluxseg4ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2( @@ -242,7 +242,7 @@ void test_vluxseg4ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4( @@ -259,7 +259,7 @@ void test_vluxseg4ei8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2( @@ -276,7 +276,7 @@ void test_vluxseg4ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1( @@ -293,7 +293,7 @@ void test_vluxseg4ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2( @@ -310,7 +310,7 @@ void test_vluxseg4ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2( @@ -327,7 +327,7 @@ void test_vluxseg4ei8_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1( @@ -344,7 +344,7 @@ void test_vluxseg4ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2( @@ -361,7 +361,7 @@ void test_vluxseg4ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1( @@ -378,7 +378,7 @@ void test_vluxseg4ei8_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2( @@ -395,7 +395,7 @@ void test_vluxseg4ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8( @@ -412,7 +412,7 @@ void test_vluxseg4ei8_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf8(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4( @@ -429,7 +429,7 @@ void test_vluxseg4ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2( @@ -446,7 +446,7 @@ void test_vluxseg4ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1( @@ -463,7 +463,7 @@ void test_vluxseg4ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2( @@ -480,7 +480,7 @@ void test_vluxseg4ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4( @@ -497,7 +497,7 @@ void test_vluxseg4ei8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf4(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2( @@ -514,7 +514,7 @@ void test_vluxseg4ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1( @@ -531,7 +531,7 @@ void test_vluxseg4ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2( @@ -548,7 +548,7 @@ void test_vluxseg4ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2( @@ -565,7 +565,7 @@ void test_vluxseg4ei8_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32mf2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1( @@ -582,7 +582,7 @@ void test_vluxseg4ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2( @@ -599,7 +599,7 @@ void test_vluxseg4ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1( @@ -616,7 +616,7 @@ void test_vluxseg4ei8_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m1(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m1(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2( @@ -633,7 +633,7 @@ void test_vluxseg4ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m2(v0, v1, v2, v3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m2(v0, v1, v2, v3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_m( @@ -650,7 +650,7 @@ void test_vluxseg4ei8_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_m( @@ -667,7 +667,7 @@ void test_vluxseg4ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_m( @@ -684,7 +684,7 @@ void test_vluxseg4ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_m( @@ -701,7 +701,7 @@ void test_vluxseg4ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( @@ -718,7 +718,7 @@ void test_vluxseg4ei8_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_m( @@ -735,7 +735,7 @@ void test_vluxseg4ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_m( @@ -752,7 +752,7 @@ void test_vluxseg4ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_m( @@ -769,7 +769,7 @@ void test_vluxseg4ei8_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_m( @@ -786,7 +786,7 @@ void test_vluxseg4ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_m( @@ -803,7 +803,7 @@ void test_vluxseg4ei8_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_m( @@ -820,7 +820,7 @@ void test_vluxseg4ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_m( @@ -837,7 +837,7 @@ void test_vluxseg4ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_m( @@ -854,7 +854,7 @@ void test_vluxseg4ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_m( @@ -871,7 +871,7 @@ void test_vluxseg4ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_m( @@ -888,7 +888,7 @@ void test_vluxseg4ei8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_m( @@ -905,7 +905,7 @@ void test_vluxseg4ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_m( @@ -922,7 +922,7 @@ void test_vluxseg4ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_m( @@ -939,7 +939,7 @@ void test_vluxseg4ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_m( @@ -956,7 +956,7 @@ void test_vluxseg4ei8_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_m( @@ -973,7 +973,7 @@ void test_vluxseg4ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_m( @@ -990,7 +990,7 @@ void test_vluxseg4ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_m( @@ -1007,7 +1007,7 @@ void test_vluxseg4ei8_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_m( @@ -1024,7 +1024,7 @@ void test_vluxseg4ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( @@ -1041,7 +1041,7 @@ void test_vluxseg4ei8_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( @@ -1058,7 +1058,7 @@ void test_vluxseg4ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( @@ -1075,7 +1075,7 @@ void test_vluxseg4ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_m( @@ -1092,7 +1092,7 @@ void test_vluxseg4ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_m( @@ -1109,7 +1109,7 @@ void test_vluxseg4ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( @@ -1126,7 +1126,7 @@ void test_vluxseg4ei8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( @@ -1143,7 +1143,7 @@ void test_vluxseg4ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_m( @@ -1160,7 +1160,7 @@ void test_vluxseg4ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_m( @@ -1177,7 +1177,7 @@ void test_vluxseg4ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( @@ -1194,7 +1194,7 @@ void test_vluxseg4ei8_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_m( @@ -1211,7 +1211,7 @@ void test_vluxseg4ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_m( @@ -1228,7 +1228,7 @@ void test_vluxseg4ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_m( @@ -1245,7 +1245,7 @@ void test_vluxseg4ei8_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_m( @@ -1262,6 +1262,6 @@ void test_vluxseg4ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c index e3442bcdeb9b..006f8fe13ec5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei16.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2( @@ -42,7 +42,7 @@ void test_vluxseg5ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1( @@ -61,7 +61,7 @@ void test_vluxseg5ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2( @@ -80,7 +80,7 @@ void test_vluxseg5ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1( @@ -99,7 +99,7 @@ void test_vluxseg5ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1( @@ -118,7 +118,7 @@ void test_vluxseg5ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8( @@ -137,7 +137,7 @@ void test_vluxseg5ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4( @@ -156,7 +156,7 @@ void test_vluxseg5ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2( @@ -175,7 +175,7 @@ void test_vluxseg5ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1( @@ -194,7 +194,7 @@ void test_vluxseg5ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4( @@ -213,7 +213,7 @@ void test_vluxseg5ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2( @@ -232,7 +232,7 @@ void test_vluxseg5ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1( @@ -251,7 +251,7 @@ void test_vluxseg5ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2( @@ -270,7 +270,7 @@ void test_vluxseg5ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1( @@ -289,7 +289,7 @@ void test_vluxseg5ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1( @@ -308,7 +308,7 @@ void test_vluxseg5ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8( @@ -327,7 +327,7 @@ void test_vluxseg5ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4( @@ -346,7 +346,7 @@ void test_vluxseg5ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2( @@ -365,7 +365,7 @@ void test_vluxseg5ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1( @@ -384,7 +384,7 @@ void test_vluxseg5ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4( @@ -403,7 +403,7 @@ void test_vluxseg5ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2( @@ -422,7 +422,7 @@ void test_vluxseg5ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1( @@ -441,7 +441,7 @@ void test_vluxseg5ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2( @@ -460,7 +460,7 @@ void test_vluxseg5ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1( @@ -479,7 +479,7 @@ void test_vluxseg5ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1( @@ -498,7 +498,7 @@ void test_vluxseg5ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_m( @@ -517,7 +517,7 @@ void test_vluxseg5ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_m( @@ -536,7 +536,7 @@ void test_vluxseg5ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_m( @@ -555,7 +555,7 @@ void test_vluxseg5ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( @@ -574,7 +574,7 @@ void test_vluxseg5ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_m( @@ -593,7 +593,7 @@ void test_vluxseg5ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_m( @@ -612,7 +612,7 @@ void test_vluxseg5ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_m( @@ -631,7 +631,7 @@ void test_vluxseg5ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_m( @@ -650,7 +650,7 @@ void test_vluxseg5ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_m( @@ -669,7 +669,7 @@ void test_vluxseg5ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_m( @@ -688,7 +688,7 @@ void test_vluxseg5ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( @@ -707,7 +707,7 @@ void test_vluxseg5ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_m( @@ -726,7 +726,7 @@ void test_vluxseg5ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_m( @@ -745,7 +745,7 @@ void test_vluxseg5ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_m( @@ -764,7 +764,7 @@ void test_vluxseg5ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_m( @@ -783,7 +783,7 @@ void test_vluxseg5ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_m( @@ -802,7 +802,7 @@ void test_vluxseg5ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( @@ -821,7 +821,7 @@ void test_vluxseg5ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( @@ -840,7 +840,7 @@ void test_vluxseg5ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( @@ -859,7 +859,7 @@ void test_vluxseg5ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_m( @@ -878,7 +878,7 @@ void test_vluxseg5ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( @@ -897,7 +897,7 @@ void test_vluxseg5ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( @@ -916,7 +916,7 @@ void test_vluxseg5ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_m( @@ -935,7 +935,7 @@ void test_vluxseg5ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( @@ -954,7 +954,7 @@ void test_vluxseg5ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_m( @@ -973,7 +973,7 @@ void test_vluxseg5ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_m( @@ -992,6 +992,6 @@ void test_vluxseg5ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c index f143f2d6f158..d5fcd65a5968 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei32.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2( @@ -42,7 +42,7 @@ void test_vluxseg5ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1( @@ -61,7 +61,7 @@ void test_vluxseg5ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2( @@ -80,7 +80,7 @@ void test_vluxseg5ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1( @@ -99,7 +99,7 @@ void test_vluxseg5ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1( @@ -118,7 +118,7 @@ void test_vluxseg5ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8( @@ -137,7 +137,7 @@ void test_vluxseg5ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4( @@ -156,7 +156,7 @@ void test_vluxseg5ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2( @@ -175,7 +175,7 @@ void test_vluxseg5ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1( @@ -194,7 +194,7 @@ void test_vluxseg5ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4( @@ -213,7 +213,7 @@ void test_vluxseg5ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2( @@ -232,7 +232,7 @@ void test_vluxseg5ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1( @@ -251,7 +251,7 @@ void test_vluxseg5ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2( @@ -270,7 +270,7 @@ void test_vluxseg5ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1( @@ -289,7 +289,7 @@ void test_vluxseg5ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1( @@ -308,7 +308,7 @@ void test_vluxseg5ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8( @@ -327,7 +327,7 @@ void test_vluxseg5ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4( @@ -346,7 +346,7 @@ void test_vluxseg5ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2( @@ -365,7 +365,7 @@ void test_vluxseg5ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1( @@ -384,7 +384,7 @@ void test_vluxseg5ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4( @@ -403,7 +403,7 @@ void test_vluxseg5ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2( @@ -422,7 +422,7 @@ void test_vluxseg5ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1( @@ -441,7 +441,7 @@ void test_vluxseg5ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2( @@ -460,7 +460,7 @@ void test_vluxseg5ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1( @@ -479,7 +479,7 @@ void test_vluxseg5ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1( @@ -498,7 +498,7 @@ void test_vluxseg5ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_m( @@ -517,7 +517,7 @@ void test_vluxseg5ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_m( @@ -536,7 +536,7 @@ void test_vluxseg5ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_m( @@ -555,7 +555,7 @@ void test_vluxseg5ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( @@ -574,7 +574,7 @@ void test_vluxseg5ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_m( @@ -593,7 +593,7 @@ void test_vluxseg5ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_m( @@ -612,7 +612,7 @@ void test_vluxseg5ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_m( @@ -631,7 +631,7 @@ void test_vluxseg5ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_m( @@ -650,7 +650,7 @@ void test_vluxseg5ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_m( @@ -669,7 +669,7 @@ void test_vluxseg5ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_m( @@ -688,7 +688,7 @@ void test_vluxseg5ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_m( @@ -707,7 +707,7 @@ void test_vluxseg5ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_m( @@ -726,7 +726,7 @@ void test_vluxseg5ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_m( @@ -745,7 +745,7 @@ void test_vluxseg5ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_m( @@ -764,7 +764,7 @@ void test_vluxseg5ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_m( @@ -783,7 +783,7 @@ void test_vluxseg5ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_m( @@ -802,7 +802,7 @@ void test_vluxseg5ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( @@ -821,7 +821,7 @@ void test_vluxseg5ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( @@ -840,7 +840,7 @@ void test_vluxseg5ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( @@ -859,7 +859,7 @@ void test_vluxseg5ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_m( @@ -878,7 +878,7 @@ void test_vluxseg5ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( @@ -897,7 +897,7 @@ void test_vluxseg5ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( @@ -916,7 +916,7 @@ void test_vluxseg5ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_m( @@ -935,7 +935,7 @@ void test_vluxseg5ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( @@ -954,7 +954,7 @@ void test_vluxseg5ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_m( @@ -973,7 +973,7 @@ void test_vluxseg5ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_m( @@ -992,6 +992,6 @@ void test_vluxseg5ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c index f881bedd115c..05f024e065f7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei64.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2( @@ -42,7 +42,7 @@ void test_vluxseg5ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1( @@ -61,7 +61,7 @@ void test_vluxseg5ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2( @@ -80,7 +80,7 @@ void test_vluxseg5ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1( @@ -99,7 +99,7 @@ void test_vluxseg5ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1( @@ -118,7 +118,7 @@ void test_vluxseg5ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8( @@ -137,7 +137,7 @@ void test_vluxseg5ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4( @@ -156,7 +156,7 @@ void test_vluxseg5ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2( @@ -175,7 +175,7 @@ void test_vluxseg5ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1( @@ -194,7 +194,7 @@ void test_vluxseg5ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4( @@ -213,7 +213,7 @@ void test_vluxseg5ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2( @@ -232,7 +232,7 @@ void test_vluxseg5ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1( @@ -251,7 +251,7 @@ void test_vluxseg5ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2( @@ -270,7 +270,7 @@ void test_vluxseg5ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1( @@ -289,7 +289,7 @@ void test_vluxseg5ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1( @@ -308,7 +308,7 @@ void test_vluxseg5ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8( @@ -327,7 +327,7 @@ void test_vluxseg5ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4( @@ -346,7 +346,7 @@ void test_vluxseg5ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2( @@ -365,7 +365,7 @@ void test_vluxseg5ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1( @@ -384,7 +384,7 @@ void test_vluxseg5ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4( @@ -403,7 +403,7 @@ void test_vluxseg5ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2( @@ -422,7 +422,7 @@ void test_vluxseg5ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1( @@ -441,7 +441,7 @@ void test_vluxseg5ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2( @@ -460,7 +460,7 @@ void test_vluxseg5ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1( @@ -479,7 +479,7 @@ void test_vluxseg5ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1( @@ -498,7 +498,7 @@ void test_vluxseg5ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_m( @@ -517,7 +517,7 @@ void test_vluxseg5ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_m( @@ -536,7 +536,7 @@ void test_vluxseg5ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_m( @@ -555,7 +555,7 @@ void test_vluxseg5ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( @@ -574,7 +574,7 @@ void test_vluxseg5ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_m( @@ -593,7 +593,7 @@ void test_vluxseg5ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_m( @@ -612,7 +612,7 @@ void test_vluxseg5ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_m( @@ -631,7 +631,7 @@ void test_vluxseg5ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_m( @@ -650,7 +650,7 @@ void test_vluxseg5ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_m( @@ -669,7 +669,7 @@ void test_vluxseg5ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_m( @@ -688,7 +688,7 @@ void test_vluxseg5ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_m( @@ -707,7 +707,7 @@ void test_vluxseg5ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_m( @@ -726,7 +726,7 @@ void test_vluxseg5ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_m( @@ -745,7 +745,7 @@ void test_vluxseg5ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_m( @@ -764,7 +764,7 @@ void test_vluxseg5ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_m( @@ -783,7 +783,7 @@ void test_vluxseg5ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_m( @@ -802,7 +802,7 @@ void test_vluxseg5ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( @@ -821,7 +821,7 @@ void test_vluxseg5ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( @@ -840,7 +840,7 @@ void test_vluxseg5ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( @@ -859,7 +859,7 @@ void test_vluxseg5ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_m( @@ -878,7 +878,7 @@ void test_vluxseg5ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( @@ -897,7 +897,7 @@ void test_vluxseg5ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( @@ -916,7 +916,7 @@ void test_vluxseg5ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_m( @@ -935,7 +935,7 @@ void test_vluxseg5ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( @@ -954,7 +954,7 @@ void test_vluxseg5ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_m( @@ -973,7 +973,7 @@ void test_vluxseg5ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_m( @@ -992,6 +992,6 @@ void test_vluxseg5ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c index 557c3cb32135..b2b5166cd628 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg5ei8.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2( @@ -42,7 +42,7 @@ void test_vluxseg5ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1( @@ -61,7 +61,7 @@ void test_vluxseg5ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2( @@ -80,7 +80,7 @@ void test_vluxseg5ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1( @@ -99,7 +99,7 @@ void test_vluxseg5ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1( @@ -118,7 +118,7 @@ void test_vluxseg5ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8( @@ -137,7 +137,7 @@ void test_vluxseg5ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4( @@ -156,7 +156,7 @@ void test_vluxseg5ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2( @@ -175,7 +175,7 @@ void test_vluxseg5ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1( @@ -194,7 +194,7 @@ void test_vluxseg5ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4( @@ -213,7 +213,7 @@ void test_vluxseg5ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2( @@ -232,7 +232,7 @@ void test_vluxseg5ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1( @@ -251,7 +251,7 @@ void test_vluxseg5ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2( @@ -270,7 +270,7 @@ void test_vluxseg5ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1( @@ -289,7 +289,7 @@ void test_vluxseg5ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1( @@ -308,7 +308,7 @@ void test_vluxseg5ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8( @@ -327,7 +327,7 @@ void test_vluxseg5ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf8(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4( @@ -346,7 +346,7 @@ void test_vluxseg5ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2( @@ -365,7 +365,7 @@ void test_vluxseg5ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1( @@ -384,7 +384,7 @@ void test_vluxseg5ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4( @@ -403,7 +403,7 @@ void test_vluxseg5ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2( @@ -422,7 +422,7 @@ void test_vluxseg5ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1( @@ -441,7 +441,7 @@ void test_vluxseg5ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2( @@ -460,7 +460,7 @@ void test_vluxseg5ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32mf2(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1( @@ -479,7 +479,7 @@ void test_vluxseg5ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1( @@ -498,7 +498,7 @@ void test_vluxseg5ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u64m1(v0, v1, v2, v3, v4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_m( @@ -517,7 +517,7 @@ void test_vluxseg5ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_m( @@ -536,7 +536,7 @@ void test_vluxseg5ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_m( @@ -555,7 +555,7 @@ void test_vluxseg5ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( @@ -574,7 +574,7 @@ void test_vluxseg5ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_m( @@ -593,7 +593,7 @@ void test_vluxseg5ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_m( @@ -612,7 +612,7 @@ void test_vluxseg5ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_m( @@ -631,7 +631,7 @@ void test_vluxseg5ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_m( @@ -650,7 +650,7 @@ void test_vluxseg5ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_m( @@ -669,7 +669,7 @@ void test_vluxseg5ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_m( @@ -688,7 +688,7 @@ void test_vluxseg5ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_m( @@ -707,7 +707,7 @@ void test_vluxseg5ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_m( @@ -726,7 +726,7 @@ void test_vluxseg5ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_m( @@ -745,7 +745,7 @@ void test_vluxseg5ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_m( @@ -764,7 +764,7 @@ void test_vluxseg5ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_m( @@ -783,7 +783,7 @@ void test_vluxseg5ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_m( @@ -802,7 +802,7 @@ void test_vluxseg5ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( @@ -821,7 +821,7 @@ void test_vluxseg5ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( @@ -840,7 +840,7 @@ void test_vluxseg5ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( @@ -859,7 +859,7 @@ void test_vluxseg5ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_m( @@ -878,7 +878,7 @@ void test_vluxseg5ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( @@ -897,7 +897,7 @@ void test_vluxseg5ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( @@ -916,7 +916,7 @@ void test_vluxseg5ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_m( @@ -935,7 +935,7 @@ void test_vluxseg5ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( @@ -954,7 +954,7 @@ void test_vluxseg5ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_m( @@ -973,7 +973,7 @@ void test_vluxseg5ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_m( @@ -992,6 +992,6 @@ void test_vluxseg5ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c index f27418d37b84..c2d2dd6b2c81 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei16.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2( @@ -46,7 +46,7 @@ void test_vluxseg6ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1( @@ -67,7 +67,7 @@ void test_vluxseg6ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2( @@ -88,7 +88,7 @@ void test_vluxseg6ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1( @@ -109,7 +109,7 @@ void test_vluxseg6ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1( @@ -130,7 +130,7 @@ void test_vluxseg6ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8( @@ -151,7 +151,7 @@ void test_vluxseg6ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4( @@ -172,7 +172,7 @@ void test_vluxseg6ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2( @@ -193,7 +193,7 @@ void test_vluxseg6ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1( @@ -214,7 +214,7 @@ void test_vluxseg6ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4( @@ -235,7 +235,7 @@ void test_vluxseg6ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2( @@ -256,7 +256,7 @@ void test_vluxseg6ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1( @@ -277,7 +277,7 @@ void test_vluxseg6ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2( @@ -298,7 +298,7 @@ void test_vluxseg6ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1( @@ -319,7 +319,7 @@ void test_vluxseg6ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1( @@ -340,7 +340,7 @@ void test_vluxseg6ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8( @@ -361,7 +361,7 @@ void test_vluxseg6ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4( @@ -382,7 +382,7 @@ void test_vluxseg6ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2( @@ -403,7 +403,7 @@ void test_vluxseg6ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1( @@ -424,7 +424,7 @@ void test_vluxseg6ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4( @@ -445,7 +445,7 @@ void test_vluxseg6ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2( @@ -466,7 +466,7 @@ void test_vluxseg6ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1( @@ -487,7 +487,7 @@ void test_vluxseg6ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2( @@ -508,7 +508,7 @@ void test_vluxseg6ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1( @@ -529,7 +529,7 @@ void test_vluxseg6ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1( @@ -550,7 +550,7 @@ void test_vluxseg6ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_m( @@ -571,7 +571,7 @@ void test_vluxseg6ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_m( @@ -592,7 +592,7 @@ void test_vluxseg6ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_m( @@ -613,7 +613,7 @@ void test_vluxseg6ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vluxseg6ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_m( @@ -655,7 +655,7 @@ void test_vluxseg6ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_m( @@ -676,7 +676,7 @@ void test_vluxseg6ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_m( @@ -697,7 +697,7 @@ void test_vluxseg6ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_m( @@ -718,7 +718,7 @@ void test_vluxseg6ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vluxseg6ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_m( @@ -760,7 +760,7 @@ void test_vluxseg6ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( @@ -781,7 +781,7 @@ void test_vluxseg6ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_m( @@ -802,7 +802,7 @@ void test_vluxseg6ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_m( @@ -823,7 +823,7 @@ void test_vluxseg6ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vluxseg6ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_m( @@ -865,7 +865,7 @@ void test_vluxseg6ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_m( @@ -886,7 +886,7 @@ void test_vluxseg6ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( @@ -907,7 +907,7 @@ void test_vluxseg6ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( @@ -928,7 +928,7 @@ void test_vluxseg6ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vluxseg6ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_m( @@ -970,7 +970,7 @@ void test_vluxseg6ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( @@ -991,7 +991,7 @@ void test_vluxseg6ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( @@ -1012,7 +1012,7 @@ void test_vluxseg6ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_m( @@ -1033,7 +1033,7 @@ void test_vluxseg6ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vluxseg6ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_m( @@ -1075,7 +1075,7 @@ void test_vluxseg6ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_m( @@ -1096,6 +1096,6 @@ void test_vluxseg6ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c index 331a7ba4a565..990da7dac0fc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei32.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2( @@ -46,7 +46,7 @@ void test_vluxseg6ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1( @@ -67,7 +67,7 @@ void test_vluxseg6ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2( @@ -88,7 +88,7 @@ void test_vluxseg6ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1( @@ -109,7 +109,7 @@ void test_vluxseg6ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1( @@ -130,7 +130,7 @@ void test_vluxseg6ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8( @@ -151,7 +151,7 @@ void test_vluxseg6ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4( @@ -172,7 +172,7 @@ void test_vluxseg6ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2( @@ -193,7 +193,7 @@ void test_vluxseg6ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1( @@ -214,7 +214,7 @@ void test_vluxseg6ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4( @@ -235,7 +235,7 @@ void test_vluxseg6ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2( @@ -256,7 +256,7 @@ void test_vluxseg6ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1( @@ -277,7 +277,7 @@ void test_vluxseg6ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2( @@ -298,7 +298,7 @@ void test_vluxseg6ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1( @@ -319,7 +319,7 @@ void test_vluxseg6ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1( @@ -340,7 +340,7 @@ void test_vluxseg6ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8( @@ -361,7 +361,7 @@ void test_vluxseg6ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4( @@ -382,7 +382,7 @@ void test_vluxseg6ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2( @@ -403,7 +403,7 @@ void test_vluxseg6ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1( @@ -424,7 +424,7 @@ void test_vluxseg6ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4( @@ -445,7 +445,7 @@ void test_vluxseg6ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2( @@ -466,7 +466,7 @@ void test_vluxseg6ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1( @@ -487,7 +487,7 @@ void test_vluxseg6ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2( @@ -508,7 +508,7 @@ void test_vluxseg6ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1( @@ -529,7 +529,7 @@ void test_vluxseg6ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1( @@ -550,7 +550,7 @@ void test_vluxseg6ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_m( @@ -571,7 +571,7 @@ void test_vluxseg6ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_m( @@ -592,7 +592,7 @@ void test_vluxseg6ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_m( @@ -613,7 +613,7 @@ void test_vluxseg6ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vluxseg6ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_m( @@ -655,7 +655,7 @@ void test_vluxseg6ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_m( @@ -676,7 +676,7 @@ void test_vluxseg6ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_m( @@ -697,7 +697,7 @@ void test_vluxseg6ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_m( @@ -718,7 +718,7 @@ void test_vluxseg6ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vluxseg6ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_m( @@ -760,7 +760,7 @@ void test_vluxseg6ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_m( @@ -781,7 +781,7 @@ void test_vluxseg6ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_m( @@ -802,7 +802,7 @@ void test_vluxseg6ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_m( @@ -823,7 +823,7 @@ void test_vluxseg6ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vluxseg6ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_m( @@ -865,7 +865,7 @@ void test_vluxseg6ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_m( @@ -886,7 +886,7 @@ void test_vluxseg6ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( @@ -907,7 +907,7 @@ void test_vluxseg6ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( @@ -928,7 +928,7 @@ void test_vluxseg6ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vluxseg6ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_m( @@ -970,7 +970,7 @@ void test_vluxseg6ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( @@ -991,7 +991,7 @@ void test_vluxseg6ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( @@ -1012,7 +1012,7 @@ void test_vluxseg6ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_m( @@ -1033,7 +1033,7 @@ void test_vluxseg6ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vluxseg6ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_m( @@ -1075,7 +1075,7 @@ void test_vluxseg6ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_m( @@ -1096,6 +1096,6 @@ void test_vluxseg6ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c index 204490a0a082..a212c596b31b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei64.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2( @@ -46,7 +46,7 @@ void test_vluxseg6ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1( @@ -67,7 +67,7 @@ void test_vluxseg6ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2( @@ -88,7 +88,7 @@ void test_vluxseg6ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1( @@ -109,7 +109,7 @@ void test_vluxseg6ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1( @@ -130,7 +130,7 @@ void test_vluxseg6ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8( @@ -151,7 +151,7 @@ void test_vluxseg6ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4( @@ -172,7 +172,7 @@ void test_vluxseg6ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2( @@ -193,7 +193,7 @@ void test_vluxseg6ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1( @@ -214,7 +214,7 @@ void test_vluxseg6ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4( @@ -235,7 +235,7 @@ void test_vluxseg6ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2( @@ -256,7 +256,7 @@ void test_vluxseg6ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1( @@ -277,7 +277,7 @@ void test_vluxseg6ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2( @@ -298,7 +298,7 @@ void test_vluxseg6ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1( @@ -319,7 +319,7 @@ void test_vluxseg6ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1( @@ -340,7 +340,7 @@ void test_vluxseg6ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8( @@ -361,7 +361,7 @@ void test_vluxseg6ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4( @@ -382,7 +382,7 @@ void test_vluxseg6ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2( @@ -403,7 +403,7 @@ void test_vluxseg6ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1( @@ -424,7 +424,7 @@ void test_vluxseg6ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4( @@ -445,7 +445,7 @@ void test_vluxseg6ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2( @@ -466,7 +466,7 @@ void test_vluxseg6ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1( @@ -487,7 +487,7 @@ void test_vluxseg6ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2( @@ -508,7 +508,7 @@ void test_vluxseg6ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1( @@ -529,7 +529,7 @@ void test_vluxseg6ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1( @@ -550,7 +550,7 @@ void test_vluxseg6ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_m( @@ -571,7 +571,7 @@ void test_vluxseg6ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_m( @@ -592,7 +592,7 @@ void test_vluxseg6ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_m( @@ -613,7 +613,7 @@ void test_vluxseg6ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vluxseg6ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_m( @@ -655,7 +655,7 @@ void test_vluxseg6ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_m( @@ -676,7 +676,7 @@ void test_vluxseg6ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_m( @@ -697,7 +697,7 @@ void test_vluxseg6ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_m( @@ -718,7 +718,7 @@ void test_vluxseg6ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vluxseg6ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_m( @@ -760,7 +760,7 @@ void test_vluxseg6ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_m( @@ -781,7 +781,7 @@ void test_vluxseg6ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_m( @@ -802,7 +802,7 @@ void test_vluxseg6ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_m( @@ -823,7 +823,7 @@ void test_vluxseg6ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vluxseg6ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_m( @@ -865,7 +865,7 @@ void test_vluxseg6ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_m( @@ -886,7 +886,7 @@ void test_vluxseg6ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( @@ -907,7 +907,7 @@ void test_vluxseg6ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( @@ -928,7 +928,7 @@ void test_vluxseg6ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vluxseg6ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_m( @@ -970,7 +970,7 @@ void test_vluxseg6ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( @@ -991,7 +991,7 @@ void test_vluxseg6ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( @@ -1012,7 +1012,7 @@ void test_vluxseg6ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_m( @@ -1033,7 +1033,7 @@ void test_vluxseg6ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vluxseg6ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_m( @@ -1075,7 +1075,7 @@ void test_vluxseg6ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_m( @@ -1096,6 +1096,6 @@ void test_vluxseg6ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c index 9abb78d27141..6c1160bc4f93 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg6ei8.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2( @@ -46,7 +46,7 @@ void test_vluxseg6ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1( @@ -67,7 +67,7 @@ void test_vluxseg6ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2( @@ -88,7 +88,7 @@ void test_vluxseg6ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1( @@ -109,7 +109,7 @@ void test_vluxseg6ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1( @@ -130,7 +130,7 @@ void test_vluxseg6ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8( @@ -151,7 +151,7 @@ void test_vluxseg6ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4( @@ -172,7 +172,7 @@ void test_vluxseg6ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2( @@ -193,7 +193,7 @@ void test_vluxseg6ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1( @@ -214,7 +214,7 @@ void test_vluxseg6ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4( @@ -235,7 +235,7 @@ void test_vluxseg6ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2( @@ -256,7 +256,7 @@ void test_vluxseg6ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1( @@ -277,7 +277,7 @@ void test_vluxseg6ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2( @@ -298,7 +298,7 @@ void test_vluxseg6ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1( @@ -319,7 +319,7 @@ void test_vluxseg6ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1( @@ -340,7 +340,7 @@ void test_vluxseg6ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8( @@ -361,7 +361,7 @@ void test_vluxseg6ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4( @@ -382,7 +382,7 @@ void test_vluxseg6ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2( @@ -403,7 +403,7 @@ void test_vluxseg6ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1( @@ -424,7 +424,7 @@ void test_vluxseg6ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4( @@ -445,7 +445,7 @@ void test_vluxseg6ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2( @@ -466,7 +466,7 @@ void test_vluxseg6ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1( @@ -487,7 +487,7 @@ void test_vluxseg6ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2( @@ -508,7 +508,7 @@ void test_vluxseg6ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1( @@ -529,7 +529,7 @@ void test_vluxseg6ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1( @@ -550,7 +550,7 @@ void test_vluxseg6ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_m( @@ -571,7 +571,7 @@ void test_vluxseg6ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_m( @@ -592,7 +592,7 @@ void test_vluxseg6ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_m( @@ -613,7 +613,7 @@ void test_vluxseg6ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( @@ -634,7 +634,7 @@ void test_vluxseg6ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_m( @@ -655,7 +655,7 @@ void test_vluxseg6ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_m( @@ -676,7 +676,7 @@ void test_vluxseg6ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_m( @@ -697,7 +697,7 @@ void test_vluxseg6ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_m( @@ -718,7 +718,7 @@ void test_vluxseg6ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_m( @@ -739,7 +739,7 @@ void test_vluxseg6ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_m( @@ -760,7 +760,7 @@ void test_vluxseg6ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_m( @@ -781,7 +781,7 @@ void test_vluxseg6ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_m( @@ -802,7 +802,7 @@ void test_vluxseg6ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_m( @@ -823,7 +823,7 @@ void test_vluxseg6ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_m( @@ -844,7 +844,7 @@ void test_vluxseg6ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_m( @@ -865,7 +865,7 @@ void test_vluxseg6ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_m( @@ -886,7 +886,7 @@ void test_vluxseg6ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( @@ -907,7 +907,7 @@ void test_vluxseg6ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( @@ -928,7 +928,7 @@ void test_vluxseg6ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( @@ -949,7 +949,7 @@ void test_vluxseg6ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_m( @@ -970,7 +970,7 @@ void test_vluxseg6ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( @@ -991,7 +991,7 @@ void test_vluxseg6ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( @@ -1012,7 +1012,7 @@ void test_vluxseg6ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_m( @@ -1033,7 +1033,7 @@ void test_vluxseg6ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( @@ -1054,7 +1054,7 @@ void test_vluxseg6ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_m( @@ -1075,7 +1075,7 @@ void test_vluxseg6ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_m( @@ -1096,6 +1096,6 @@ void test_vluxseg6ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c index 5c68e7c95bbf..0ee1f97e5f3d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei16.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2( @@ -50,7 +50,7 @@ void test_vluxseg7ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1( @@ -73,7 +73,7 @@ void test_vluxseg7ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2( @@ -96,7 +96,7 @@ void test_vluxseg7ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1( @@ -119,7 +119,7 @@ void test_vluxseg7ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1( @@ -142,7 +142,7 @@ void test_vluxseg7ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8( @@ -165,7 +165,7 @@ void test_vluxseg7ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4( @@ -188,7 +188,7 @@ void test_vluxseg7ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2( @@ -211,7 +211,7 @@ void test_vluxseg7ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1( @@ -234,7 +234,7 @@ void test_vluxseg7ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4( @@ -257,7 +257,7 @@ void test_vluxseg7ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2( @@ -280,7 +280,7 @@ void test_vluxseg7ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1( @@ -303,7 +303,7 @@ void test_vluxseg7ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2( @@ -326,7 +326,7 @@ void test_vluxseg7ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1( @@ -349,7 +349,7 @@ void test_vluxseg7ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1( @@ -372,7 +372,7 @@ void test_vluxseg7ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8( @@ -395,7 +395,7 @@ void test_vluxseg7ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4( @@ -418,7 +418,7 @@ void test_vluxseg7ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2( @@ -441,7 +441,7 @@ void test_vluxseg7ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1( @@ -464,7 +464,7 @@ void test_vluxseg7ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4( @@ -487,7 +487,7 @@ void test_vluxseg7ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2( @@ -510,7 +510,7 @@ void test_vluxseg7ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1( @@ -533,7 +533,7 @@ void test_vluxseg7ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2( @@ -556,7 +556,7 @@ void test_vluxseg7ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1( @@ -579,7 +579,7 @@ void test_vluxseg7ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1( @@ -602,7 +602,7 @@ void test_vluxseg7ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_m( @@ -625,7 +625,7 @@ void test_vluxseg7ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_m( @@ -648,7 +648,7 @@ void test_vluxseg7ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_m( @@ -671,7 +671,7 @@ void test_vluxseg7ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( @@ -694,7 +694,7 @@ void test_vluxseg7ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_m( @@ -717,7 +717,7 @@ void test_vluxseg7ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_m( @@ -740,7 +740,7 @@ void test_vluxseg7ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_m( @@ -763,7 +763,7 @@ void test_vluxseg7ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_m( @@ -786,7 +786,7 @@ void test_vluxseg7ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_m( @@ -809,7 +809,7 @@ void test_vluxseg7ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_m( @@ -832,7 +832,7 @@ void test_vluxseg7ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_m( @@ -855,7 +855,7 @@ void test_vluxseg7ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_m( @@ -878,7 +878,7 @@ void test_vluxseg7ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vluxseg7ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_m( @@ -924,7 +924,7 @@ void test_vluxseg7ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_m( @@ -947,7 +947,7 @@ void test_vluxseg7ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_m( @@ -970,7 +970,7 @@ void test_vluxseg7ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( @@ -993,7 +993,7 @@ void test_vluxseg7ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( @@ -1016,7 +1016,7 @@ void test_vluxseg7ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( @@ -1039,7 +1039,7 @@ void test_vluxseg7ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_m( @@ -1062,7 +1062,7 @@ void test_vluxseg7ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( @@ -1085,7 +1085,7 @@ void test_vluxseg7ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( @@ -1108,7 +1108,7 @@ void test_vluxseg7ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_m( @@ -1131,7 +1131,7 @@ void test_vluxseg7ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( @@ -1154,7 +1154,7 @@ void test_vluxseg7ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_m( @@ -1177,7 +1177,7 @@ void test_vluxseg7ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_m( @@ -1200,6 +1200,6 @@ void test_vluxseg7ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c index 01e2ea3c579f..5be02f168b20 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei32.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2( @@ -50,7 +50,7 @@ void test_vluxseg7ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1( @@ -73,7 +73,7 @@ void test_vluxseg7ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2( @@ -96,7 +96,7 @@ void test_vluxseg7ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1( @@ -119,7 +119,7 @@ void test_vluxseg7ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1( @@ -142,7 +142,7 @@ void test_vluxseg7ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8( @@ -165,7 +165,7 @@ void test_vluxseg7ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4( @@ -188,7 +188,7 @@ void test_vluxseg7ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2( @@ -211,7 +211,7 @@ void test_vluxseg7ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1( @@ -234,7 +234,7 @@ void test_vluxseg7ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4( @@ -257,7 +257,7 @@ void test_vluxseg7ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2( @@ -280,7 +280,7 @@ void test_vluxseg7ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1( @@ -303,7 +303,7 @@ void test_vluxseg7ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2( @@ -326,7 +326,7 @@ void test_vluxseg7ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1( @@ -349,7 +349,7 @@ void test_vluxseg7ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1( @@ -372,7 +372,7 @@ void test_vluxseg7ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8( @@ -395,7 +395,7 @@ void test_vluxseg7ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4( @@ -418,7 +418,7 @@ void test_vluxseg7ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2( @@ -441,7 +441,7 @@ void test_vluxseg7ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1( @@ -464,7 +464,7 @@ void test_vluxseg7ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4( @@ -487,7 +487,7 @@ void test_vluxseg7ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2( @@ -510,7 +510,7 @@ void test_vluxseg7ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1( @@ -533,7 +533,7 @@ void test_vluxseg7ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2( @@ -556,7 +556,7 @@ void test_vluxseg7ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1( @@ -579,7 +579,7 @@ void test_vluxseg7ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1( @@ -602,7 +602,7 @@ void test_vluxseg7ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_m( @@ -625,7 +625,7 @@ void test_vluxseg7ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_m( @@ -648,7 +648,7 @@ void test_vluxseg7ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_m( @@ -671,7 +671,7 @@ void test_vluxseg7ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( @@ -694,7 +694,7 @@ void test_vluxseg7ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_m( @@ -717,7 +717,7 @@ void test_vluxseg7ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_m( @@ -740,7 +740,7 @@ void test_vluxseg7ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_m( @@ -763,7 +763,7 @@ void test_vluxseg7ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_m( @@ -786,7 +786,7 @@ void test_vluxseg7ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_m( @@ -809,7 +809,7 @@ void test_vluxseg7ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_m( @@ -832,7 +832,7 @@ void test_vluxseg7ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_m( @@ -855,7 +855,7 @@ void test_vluxseg7ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_m( @@ -878,7 +878,7 @@ void test_vluxseg7ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vluxseg7ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_m( @@ -924,7 +924,7 @@ void test_vluxseg7ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_m( @@ -947,7 +947,7 @@ void test_vluxseg7ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_m( @@ -970,7 +970,7 @@ void test_vluxseg7ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( @@ -993,7 +993,7 @@ void test_vluxseg7ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( @@ -1016,7 +1016,7 @@ void test_vluxseg7ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( @@ -1039,7 +1039,7 @@ void test_vluxseg7ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_m( @@ -1062,7 +1062,7 @@ void test_vluxseg7ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( @@ -1085,7 +1085,7 @@ void test_vluxseg7ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( @@ -1108,7 +1108,7 @@ void test_vluxseg7ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_m( @@ -1131,7 +1131,7 @@ void test_vluxseg7ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( @@ -1154,7 +1154,7 @@ void test_vluxseg7ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_m( @@ -1177,7 +1177,7 @@ void test_vluxseg7ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_m( @@ -1200,6 +1200,6 @@ void test_vluxseg7ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c index a10dbd67c555..585b1b63e7e0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei64.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2( @@ -50,7 +50,7 @@ void test_vluxseg7ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1( @@ -73,7 +73,7 @@ void test_vluxseg7ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2( @@ -96,7 +96,7 @@ void test_vluxseg7ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1( @@ -119,7 +119,7 @@ void test_vluxseg7ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1( @@ -142,7 +142,7 @@ void test_vluxseg7ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8( @@ -165,7 +165,7 @@ void test_vluxseg7ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4( @@ -188,7 +188,7 @@ void test_vluxseg7ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2( @@ -211,7 +211,7 @@ void test_vluxseg7ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1( @@ -234,7 +234,7 @@ void test_vluxseg7ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4( @@ -257,7 +257,7 @@ void test_vluxseg7ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2( @@ -280,7 +280,7 @@ void test_vluxseg7ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1( @@ -303,7 +303,7 @@ void test_vluxseg7ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2( @@ -326,7 +326,7 @@ void test_vluxseg7ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1( @@ -349,7 +349,7 @@ void test_vluxseg7ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1( @@ -372,7 +372,7 @@ void test_vluxseg7ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8( @@ -395,7 +395,7 @@ void test_vluxseg7ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4( @@ -418,7 +418,7 @@ void test_vluxseg7ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2( @@ -441,7 +441,7 @@ void test_vluxseg7ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1( @@ -464,7 +464,7 @@ void test_vluxseg7ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4( @@ -487,7 +487,7 @@ void test_vluxseg7ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2( @@ -510,7 +510,7 @@ void test_vluxseg7ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1( @@ -533,7 +533,7 @@ void test_vluxseg7ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2( @@ -556,7 +556,7 @@ void test_vluxseg7ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1( @@ -579,7 +579,7 @@ void test_vluxseg7ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1( @@ -602,7 +602,7 @@ void test_vluxseg7ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_m( @@ -625,7 +625,7 @@ void test_vluxseg7ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_m( @@ -648,7 +648,7 @@ void test_vluxseg7ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_m( @@ -671,7 +671,7 @@ void test_vluxseg7ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( @@ -694,7 +694,7 @@ void test_vluxseg7ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_m( @@ -717,7 +717,7 @@ void test_vluxseg7ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_m( @@ -740,7 +740,7 @@ void test_vluxseg7ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_m( @@ -763,7 +763,7 @@ void test_vluxseg7ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_m( @@ -786,7 +786,7 @@ void test_vluxseg7ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_m( @@ -809,7 +809,7 @@ void test_vluxseg7ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_m( @@ -832,7 +832,7 @@ void test_vluxseg7ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_m( @@ -855,7 +855,7 @@ void test_vluxseg7ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_m( @@ -878,7 +878,7 @@ void test_vluxseg7ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vluxseg7ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_m( @@ -924,7 +924,7 @@ void test_vluxseg7ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_m( @@ -947,7 +947,7 @@ void test_vluxseg7ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_m( @@ -970,7 +970,7 @@ void test_vluxseg7ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( @@ -993,7 +993,7 @@ void test_vluxseg7ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( @@ -1016,7 +1016,7 @@ void test_vluxseg7ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( @@ -1039,7 +1039,7 @@ void test_vluxseg7ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_m( @@ -1062,7 +1062,7 @@ void test_vluxseg7ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( @@ -1085,7 +1085,7 @@ void test_vluxseg7ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( @@ -1108,7 +1108,7 @@ void test_vluxseg7ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_m( @@ -1131,7 +1131,7 @@ void test_vluxseg7ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( @@ -1154,7 +1154,7 @@ void test_vluxseg7ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_m( @@ -1177,7 +1177,7 @@ void test_vluxseg7ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_m( @@ -1200,6 +1200,6 @@ void test_vluxseg7ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c index 43b859834a39..fa01ba5e5f86 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg7ei8.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2( @@ -50,7 +50,7 @@ void test_vluxseg7ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1( @@ -73,7 +73,7 @@ void test_vluxseg7ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2( @@ -96,7 +96,7 @@ void test_vluxseg7ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1( @@ -119,7 +119,7 @@ void test_vluxseg7ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1( @@ -142,7 +142,7 @@ void test_vluxseg7ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8( @@ -165,7 +165,7 @@ void test_vluxseg7ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4( @@ -188,7 +188,7 @@ void test_vluxseg7ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2( @@ -211,7 +211,7 @@ void test_vluxseg7ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1( @@ -234,7 +234,7 @@ void test_vluxseg7ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4( @@ -257,7 +257,7 @@ void test_vluxseg7ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2( @@ -280,7 +280,7 @@ void test_vluxseg7ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1( @@ -303,7 +303,7 @@ void test_vluxseg7ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2( @@ -326,7 +326,7 @@ void test_vluxseg7ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1( @@ -349,7 +349,7 @@ void test_vluxseg7ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1( @@ -372,7 +372,7 @@ void test_vluxseg7ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8( @@ -395,7 +395,7 @@ void test_vluxseg7ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4( @@ -418,7 +418,7 @@ void test_vluxseg7ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2( @@ -441,7 +441,7 @@ void test_vluxseg7ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1( @@ -464,7 +464,7 @@ void test_vluxseg7ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4( @@ -487,7 +487,7 @@ void test_vluxseg7ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2( @@ -510,7 +510,7 @@ void test_vluxseg7ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1( @@ -533,7 +533,7 @@ void test_vluxseg7ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2( @@ -556,7 +556,7 @@ void test_vluxseg7ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1( @@ -579,7 +579,7 @@ void test_vluxseg7ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1( @@ -602,7 +602,7 @@ void test_vluxseg7ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_m( @@ -625,7 +625,7 @@ void test_vluxseg7ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_m( @@ -648,7 +648,7 @@ void test_vluxseg7ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_m( @@ -671,7 +671,7 @@ void test_vluxseg7ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( @@ -694,7 +694,7 @@ void test_vluxseg7ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_m( @@ -717,7 +717,7 @@ void test_vluxseg7ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_m( @@ -740,7 +740,7 @@ void test_vluxseg7ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_m( @@ -763,7 +763,7 @@ void test_vluxseg7ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_m( @@ -786,7 +786,7 @@ void test_vluxseg7ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_m( @@ -809,7 +809,7 @@ void test_vluxseg7ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_m( @@ -832,7 +832,7 @@ void test_vluxseg7ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_m( @@ -855,7 +855,7 @@ void test_vluxseg7ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_m( @@ -878,7 +878,7 @@ void test_vluxseg7ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_m( @@ -901,7 +901,7 @@ void test_vluxseg7ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_m( @@ -924,7 +924,7 @@ void test_vluxseg7ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_m( @@ -947,7 +947,7 @@ void test_vluxseg7ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_m( @@ -970,7 +970,7 @@ void test_vluxseg7ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( @@ -993,7 +993,7 @@ void test_vluxseg7ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( @@ -1016,7 +1016,7 @@ void test_vluxseg7ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( @@ -1039,7 +1039,7 @@ void test_vluxseg7ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_m( @@ -1062,7 +1062,7 @@ void test_vluxseg7ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( @@ -1085,7 +1085,7 @@ void test_vluxseg7ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( @@ -1108,7 +1108,7 @@ void test_vluxseg7ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_m( @@ -1131,7 +1131,7 @@ void test_vluxseg7ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( @@ -1154,7 +1154,7 @@ void test_vluxseg7ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_m( @@ -1177,7 +1177,7 @@ void test_vluxseg7ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_m( @@ -1200,6 +1200,6 @@ void test_vluxseg7ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c index 429514eae9cf..2a8e7ebcb389 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei16.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2( @@ -54,7 +54,7 @@ void test_vluxseg8ei16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1( @@ -79,7 +79,7 @@ void test_vluxseg8ei16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2( @@ -104,7 +104,7 @@ void test_vluxseg8ei16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1( @@ -129,7 +129,7 @@ void test_vluxseg8ei16_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1( @@ -154,7 +154,7 @@ void test_vluxseg8ei16_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8( @@ -179,7 +179,7 @@ void test_vluxseg8ei16_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4( @@ -204,7 +204,7 @@ void test_vluxseg8ei16_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2( @@ -229,7 +229,7 @@ void test_vluxseg8ei16_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1( @@ -254,7 +254,7 @@ void test_vluxseg8ei16_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4( @@ -279,7 +279,7 @@ void test_vluxseg8ei16_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2( @@ -304,7 +304,7 @@ void test_vluxseg8ei16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1( @@ -329,7 +329,7 @@ void test_vluxseg8ei16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2( @@ -354,7 +354,7 @@ void test_vluxseg8ei16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1( @@ -379,7 +379,7 @@ void test_vluxseg8ei16_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1( @@ -404,7 +404,7 @@ void test_vluxseg8ei16_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8( @@ -429,7 +429,7 @@ void test_vluxseg8ei16_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4( @@ -454,7 +454,7 @@ void test_vluxseg8ei16_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2( @@ -479,7 +479,7 @@ void test_vluxseg8ei16_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1( @@ -504,7 +504,7 @@ void test_vluxseg8ei16_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4( @@ -529,7 +529,7 @@ void test_vluxseg8ei16_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2( @@ -554,7 +554,7 @@ void test_vluxseg8ei16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1( @@ -579,7 +579,7 @@ void test_vluxseg8ei16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2( @@ -604,7 +604,7 @@ void test_vluxseg8ei16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1( @@ -629,7 +629,7 @@ void test_vluxseg8ei16_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1( @@ -654,7 +654,7 @@ void test_vluxseg8ei16_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_m( @@ -679,7 +679,7 @@ void test_vluxseg8ei16_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_m( @@ -704,7 +704,7 @@ void test_vluxseg8ei16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_m( @@ -729,7 +729,7 @@ void test_vluxseg8ei16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( @@ -754,7 +754,7 @@ void test_vluxseg8ei16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_m( @@ -779,7 +779,7 @@ void test_vluxseg8ei16_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_m( @@ -804,7 +804,7 @@ void test_vluxseg8ei16_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_m( @@ -829,7 +829,7 @@ void test_vluxseg8ei16_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_m( @@ -854,7 +854,7 @@ void test_vluxseg8ei16_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_m( @@ -879,7 +879,7 @@ void test_vluxseg8ei16_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_m( @@ -904,7 +904,7 @@ void test_vluxseg8ei16_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_m( @@ -929,7 +929,7 @@ void test_vluxseg8ei16_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_m( @@ -954,7 +954,7 @@ void test_vluxseg8ei16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_m( @@ -979,7 +979,7 @@ void test_vluxseg8ei16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_m( @@ -1004,7 +1004,7 @@ void test_vluxseg8ei16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_m( @@ -1029,7 +1029,7 @@ void test_vluxseg8ei16_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_m( @@ -1054,7 +1054,7 @@ void test_vluxseg8ei16_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( @@ -1079,7 +1079,7 @@ void test_vluxseg8ei16_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( @@ -1104,7 +1104,7 @@ void test_vluxseg8ei16_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( @@ -1129,7 +1129,7 @@ void test_vluxseg8ei16_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_m( @@ -1154,7 +1154,7 @@ void test_vluxseg8ei16_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( @@ -1179,7 +1179,7 @@ void test_vluxseg8ei16_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( @@ -1204,7 +1204,7 @@ void test_vluxseg8ei16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_m( @@ -1229,7 +1229,7 @@ void test_vluxseg8ei16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( @@ -1254,7 +1254,7 @@ void test_vluxseg8ei16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_m( @@ -1279,7 +1279,7 @@ void test_vluxseg8ei16_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_m( @@ -1304,6 +1304,6 @@ void test_vluxseg8ei16_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c index 5a67a70ba37c..a087e7b52ccd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei32.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2( @@ -54,7 +54,7 @@ void test_vluxseg8ei32_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1( @@ -79,7 +79,7 @@ void test_vluxseg8ei32_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2( @@ -104,7 +104,7 @@ void test_vluxseg8ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1( @@ -129,7 +129,7 @@ void test_vluxseg8ei32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1( @@ -154,7 +154,7 @@ void test_vluxseg8ei32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8( @@ -179,7 +179,7 @@ void test_vluxseg8ei32_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4( @@ -204,7 +204,7 @@ void test_vluxseg8ei32_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2( @@ -229,7 +229,7 @@ void test_vluxseg8ei32_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1( @@ -254,7 +254,7 @@ void test_vluxseg8ei32_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4( @@ -279,7 +279,7 @@ void test_vluxseg8ei32_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2( @@ -304,7 +304,7 @@ void test_vluxseg8ei32_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1( @@ -329,7 +329,7 @@ void test_vluxseg8ei32_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2( @@ -354,7 +354,7 @@ void test_vluxseg8ei32_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1( @@ -379,7 +379,7 @@ void test_vluxseg8ei32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1( @@ -404,7 +404,7 @@ void test_vluxseg8ei32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8( @@ -429,7 +429,7 @@ void test_vluxseg8ei32_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4( @@ -454,7 +454,7 @@ void test_vluxseg8ei32_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2( @@ -479,7 +479,7 @@ void test_vluxseg8ei32_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1( @@ -504,7 +504,7 @@ void test_vluxseg8ei32_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4( @@ -529,7 +529,7 @@ void test_vluxseg8ei32_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2( @@ -554,7 +554,7 @@ void test_vluxseg8ei32_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1( @@ -579,7 +579,7 @@ void test_vluxseg8ei32_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2( @@ -604,7 +604,7 @@ void test_vluxseg8ei32_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1( @@ -629,7 +629,7 @@ void test_vluxseg8ei32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1( @@ -654,7 +654,7 @@ void test_vluxseg8ei32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_m( @@ -679,7 +679,7 @@ void test_vluxseg8ei32_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_m( @@ -704,7 +704,7 @@ void test_vluxseg8ei32_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_m( @@ -729,7 +729,7 @@ void test_vluxseg8ei32_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( @@ -754,7 +754,7 @@ void test_vluxseg8ei32_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_m( @@ -779,7 +779,7 @@ void test_vluxseg8ei32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_m( @@ -804,7 +804,7 @@ void test_vluxseg8ei32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_m( @@ -829,7 +829,7 @@ void test_vluxseg8ei32_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_m( @@ -854,7 +854,7 @@ void test_vluxseg8ei32_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_m( @@ -879,7 +879,7 @@ void test_vluxseg8ei32_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_m( @@ -904,7 +904,7 @@ void test_vluxseg8ei32_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_m( @@ -929,7 +929,7 @@ void test_vluxseg8ei32_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_m( @@ -954,7 +954,7 @@ void test_vluxseg8ei32_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_m( @@ -979,7 +979,7 @@ void test_vluxseg8ei32_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_m( @@ -1004,7 +1004,7 @@ void test_vluxseg8ei32_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_m( @@ -1029,7 +1029,7 @@ void test_vluxseg8ei32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_m( @@ -1054,7 +1054,7 @@ void test_vluxseg8ei32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( @@ -1079,7 +1079,7 @@ void test_vluxseg8ei32_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( @@ -1104,7 +1104,7 @@ void test_vluxseg8ei32_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( @@ -1129,7 +1129,7 @@ void test_vluxseg8ei32_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_m( @@ -1154,7 +1154,7 @@ void test_vluxseg8ei32_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( @@ -1179,7 +1179,7 @@ void test_vluxseg8ei32_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( @@ -1204,7 +1204,7 @@ void test_vluxseg8ei32_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_m( @@ -1229,7 +1229,7 @@ void test_vluxseg8ei32_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( @@ -1254,7 +1254,7 @@ void test_vluxseg8ei32_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_m( @@ -1279,7 +1279,7 @@ void test_vluxseg8ei32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_m( @@ -1304,6 +1304,6 @@ void test_vluxseg8ei32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c index e954ddc75a80..b05e9b3c9804 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei64.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2( @@ -54,7 +54,7 @@ void test_vluxseg8ei64_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1( @@ -79,7 +79,7 @@ void test_vluxseg8ei64_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2( @@ -104,7 +104,7 @@ void test_vluxseg8ei64_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1( @@ -129,7 +129,7 @@ void test_vluxseg8ei64_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1( @@ -154,7 +154,7 @@ void test_vluxseg8ei64_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8( @@ -179,7 +179,7 @@ void test_vluxseg8ei64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4( @@ -204,7 +204,7 @@ void test_vluxseg8ei64_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2( @@ -229,7 +229,7 @@ void test_vluxseg8ei64_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1( @@ -254,7 +254,7 @@ void test_vluxseg8ei64_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4( @@ -279,7 +279,7 @@ void test_vluxseg8ei64_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2( @@ -304,7 +304,7 @@ void test_vluxseg8ei64_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1( @@ -329,7 +329,7 @@ void test_vluxseg8ei64_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2( @@ -354,7 +354,7 @@ void test_vluxseg8ei64_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1( @@ -379,7 +379,7 @@ void test_vluxseg8ei64_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1( @@ -404,7 +404,7 @@ void test_vluxseg8ei64_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8( @@ -429,7 +429,7 @@ void test_vluxseg8ei64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4( @@ -454,7 +454,7 @@ void test_vluxseg8ei64_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2( @@ -479,7 +479,7 @@ void test_vluxseg8ei64_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1( @@ -504,7 +504,7 @@ void test_vluxseg8ei64_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4( @@ -529,7 +529,7 @@ void test_vluxseg8ei64_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2( @@ -554,7 +554,7 @@ void test_vluxseg8ei64_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1( @@ -579,7 +579,7 @@ void test_vluxseg8ei64_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2( @@ -604,7 +604,7 @@ void test_vluxseg8ei64_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1( @@ -629,7 +629,7 @@ void test_vluxseg8ei64_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1( @@ -654,7 +654,7 @@ void test_vluxseg8ei64_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_m( @@ -679,7 +679,7 @@ void test_vluxseg8ei64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_m( @@ -704,7 +704,7 @@ void test_vluxseg8ei64_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_m( @@ -729,7 +729,7 @@ void test_vluxseg8ei64_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( @@ -754,7 +754,7 @@ void test_vluxseg8ei64_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_m( @@ -779,7 +779,7 @@ void test_vluxseg8ei64_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_m( @@ -804,7 +804,7 @@ void test_vluxseg8ei64_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_m( @@ -829,7 +829,7 @@ void test_vluxseg8ei64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_m( @@ -854,7 +854,7 @@ void test_vluxseg8ei64_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_m( @@ -879,7 +879,7 @@ void test_vluxseg8ei64_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_m( @@ -904,7 +904,7 @@ void test_vluxseg8ei64_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_m( @@ -929,7 +929,7 @@ void test_vluxseg8ei64_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_m( @@ -954,7 +954,7 @@ void test_vluxseg8ei64_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_m( @@ -979,7 +979,7 @@ void test_vluxseg8ei64_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_m( @@ -1004,7 +1004,7 @@ void test_vluxseg8ei64_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_m( @@ -1029,7 +1029,7 @@ void test_vluxseg8ei64_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_m( @@ -1054,7 +1054,7 @@ void test_vluxseg8ei64_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( @@ -1079,7 +1079,7 @@ void test_vluxseg8ei64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( @@ -1104,7 +1104,7 @@ void test_vluxseg8ei64_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( @@ -1129,7 +1129,7 @@ void test_vluxseg8ei64_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_m( @@ -1154,7 +1154,7 @@ void test_vluxseg8ei64_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( @@ -1179,7 +1179,7 @@ void test_vluxseg8ei64_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( @@ -1204,7 +1204,7 @@ void test_vluxseg8ei64_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_m( @@ -1229,7 +1229,7 @@ void test_vluxseg8ei64_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( @@ -1254,7 +1254,7 @@ void test_vluxseg8ei64_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_m( @@ -1279,7 +1279,7 @@ void test_vluxseg8ei64_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_m( @@ -1304,6 +1304,6 @@ void test_vluxseg8ei64_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c index 657328030659..95ff59fa3a3d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vluxseg8ei8.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2( @@ -54,7 +54,7 @@ void test_vluxseg8ei8_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1( @@ -79,7 +79,7 @@ void test_vluxseg8ei8_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2( @@ -104,7 +104,7 @@ void test_vluxseg8ei8_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1( @@ -129,7 +129,7 @@ void test_vluxseg8ei8_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1( @@ -154,7 +154,7 @@ void test_vluxseg8ei8_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8( @@ -179,7 +179,7 @@ void test_vluxseg8ei8_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4( @@ -204,7 +204,7 @@ void test_vluxseg8ei8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2( @@ -229,7 +229,7 @@ void test_vluxseg8ei8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1( @@ -254,7 +254,7 @@ void test_vluxseg8ei8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4( @@ -279,7 +279,7 @@ void test_vluxseg8ei8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2( @@ -304,7 +304,7 @@ void test_vluxseg8ei8_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1( @@ -329,7 +329,7 @@ void test_vluxseg8ei8_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2( @@ -354,7 +354,7 @@ void test_vluxseg8ei8_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1( @@ -379,7 +379,7 @@ void test_vluxseg8ei8_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1( @@ -404,7 +404,7 @@ void test_vluxseg8ei8_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8( @@ -429,7 +429,7 @@ void test_vluxseg8ei8_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4( @@ -454,7 +454,7 @@ void test_vluxseg8ei8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2( @@ -479,7 +479,7 @@ void test_vluxseg8ei8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1( @@ -504,7 +504,7 @@ void test_vluxseg8ei8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4( @@ -529,7 +529,7 @@ void test_vluxseg8ei8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2( @@ -554,7 +554,7 @@ void test_vluxseg8ei8_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1( @@ -579,7 +579,7 @@ void test_vluxseg8ei8_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2( @@ -604,7 +604,7 @@ void test_vluxseg8ei8_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1( @@ -629,7 +629,7 @@ void test_vluxseg8ei8_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1( @@ -654,7 +654,7 @@ void test_vluxseg8ei8_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_m( @@ -679,7 +679,7 @@ void test_vluxseg8ei8_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_m( @@ -704,7 +704,7 @@ void test_vluxseg8ei8_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_m( @@ -729,7 +729,7 @@ void test_vluxseg8ei8_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( @@ -754,7 +754,7 @@ void test_vluxseg8ei8_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_m( @@ -779,7 +779,7 @@ void test_vluxseg8ei8_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_m( @@ -804,7 +804,7 @@ void test_vluxseg8ei8_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_m( @@ -829,7 +829,7 @@ void test_vluxseg8ei8_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_m( @@ -854,7 +854,7 @@ void test_vluxseg8ei8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_m( @@ -879,7 +879,7 @@ void test_vluxseg8ei8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_m( @@ -904,7 +904,7 @@ void test_vluxseg8ei8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_m( @@ -929,7 +929,7 @@ void test_vluxseg8ei8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_m( @@ -954,7 +954,7 @@ void test_vluxseg8ei8_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_m( @@ -979,7 +979,7 @@ void test_vluxseg8ei8_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_m( @@ -1004,7 +1004,7 @@ void test_vluxseg8ei8_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_m( @@ -1029,7 +1029,7 @@ void test_vluxseg8ei8_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_m( @@ -1054,7 +1054,7 @@ void test_vluxseg8ei8_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( @@ -1079,7 +1079,7 @@ void test_vluxseg8ei8_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( @@ -1104,7 +1104,7 @@ void test_vluxseg8ei8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( @@ -1129,7 +1129,7 @@ void test_vluxseg8ei8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_m( @@ -1154,7 +1154,7 @@ void test_vluxseg8ei8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( @@ -1179,7 +1179,7 @@ void test_vluxseg8ei8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( @@ -1204,7 +1204,7 @@ void test_vluxseg8ei8_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_m( @@ -1229,7 +1229,7 @@ void test_vluxseg8ei8_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( @@ -1254,7 +1254,7 @@ void test_vluxseg8ei8_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_m( @@ -1279,7 +1279,7 @@ void test_vluxseg8ei8_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_m( @@ -1304,6 +1304,6 @@ void test_vluxseg8ei8_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c index d3c704cb8701..cdfd9e14ca22 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vv_i8mf8(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8( @@ -22,7 +22,7 @@ vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vx_i8mf8(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4( @@ -31,7 +31,7 @@ vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vv_i8mf4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4( @@ -40,7 +40,7 @@ vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vx_i8mf4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2( @@ -49,7 +49,7 @@ vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vv_i8mf2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2( @@ -58,7 +58,7 @@ vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vx_i8mf2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1( @@ -67,7 +67,7 @@ vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmacc_vv_i8m1(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1( @@ -76,7 +76,7 @@ vint8m1_t test_vmacc_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmacc_vx_i8m1(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2( @@ -85,7 +85,7 @@ vint8m1_t test_vmacc_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmacc_vv_i8m2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2( @@ -94,7 +94,7 @@ vint8m2_t test_vmacc_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmacc_vx_i8m2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4( @@ -103,7 +103,7 @@ vint8m2_t test_vmacc_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmacc_vv_i8m4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4( @@ -112,7 +112,7 @@ vint8m4_t test_vmacc_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmacc_vx_i8m4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8( @@ -121,7 +121,7 @@ vint8m4_t test_vmacc_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmacc_vv_i8m8(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8( @@ -130,7 +130,7 @@ vint8m8_t test_vmacc_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmacc_vx_i8m8(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4( @@ -139,7 +139,7 @@ vint8m8_t test_vmacc_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vv_i16mf4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4( @@ -148,7 +148,7 @@ vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vx_i16mf4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2( @@ -157,7 +157,7 @@ vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vv_i16mf2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2( @@ -166,7 +166,7 @@ vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vx_i16mf2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1( @@ -175,7 +175,7 @@ vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmacc_vv_i16m1(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1( @@ -184,7 +184,7 @@ vint16m1_t test_vmacc_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmacc_vx_i16m1(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2( @@ -193,7 +193,7 @@ vint16m1_t test_vmacc_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmacc_vv_i16m2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2( @@ -202,7 +202,7 @@ vint16m2_t test_vmacc_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmacc_vx_i16m2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4( @@ -211,7 +211,7 @@ vint16m2_t test_vmacc_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmacc_vv_i16m4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4( @@ -220,7 +220,7 @@ vint16m4_t test_vmacc_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmacc_vx_i16m4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8( @@ -229,7 +229,7 @@ vint16m4_t test_vmacc_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmacc_vv_i16m8(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8( @@ -238,7 +238,7 @@ vint16m8_t test_vmacc_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmacc_vx_i16m8(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2( @@ -247,7 +247,7 @@ vint16m8_t test_vmacc_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vv_i32mf2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2( @@ -256,7 +256,7 @@ vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vx_i32mf2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1( @@ -265,7 +265,7 @@ vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmacc_vv_i32m1(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1( @@ -274,7 +274,7 @@ vint32m1_t test_vmacc_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmacc_vx_i32m1(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2( @@ -283,7 +283,7 @@ vint32m1_t test_vmacc_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmacc_vv_i32m2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2( @@ -292,7 +292,7 @@ vint32m2_t test_vmacc_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmacc_vx_i32m2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4( @@ -301,7 +301,7 @@ vint32m2_t test_vmacc_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmacc_vv_i32m4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4( @@ -310,7 +310,7 @@ vint32m4_t test_vmacc_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmacc_vx_i32m4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8( @@ -319,7 +319,7 @@ vint32m4_t test_vmacc_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmacc_vv_i32m8(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8( @@ -328,7 +328,7 @@ vint32m8_t test_vmacc_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmacc_vx_i32m8(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1( @@ -337,7 +337,7 @@ vint32m8_t test_vmacc_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmacc_vv_i64m1(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1( @@ -346,7 +346,7 @@ vint64m1_t test_vmacc_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmacc_vx_i64m1(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2( @@ -355,7 +355,7 @@ vint64m1_t test_vmacc_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmacc_vv_i64m2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2( @@ -364,7 +364,7 @@ vint64m2_t test_vmacc_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmacc_vx_i64m2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4( @@ -373,7 +373,7 @@ vint64m2_t test_vmacc_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmacc_vv_i64m4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4( @@ -382,7 +382,7 @@ vint64m4_t test_vmacc_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmacc_vx_i64m4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8( @@ -391,7 +391,7 @@ vint64m4_t test_vmacc_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmacc_vv_i64m8(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8( @@ -400,7 +400,7 @@ vint64m8_t test_vmacc_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmacc_vx_i64m8(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8( @@ -409,7 +409,7 @@ vint64m8_t test_vmacc_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vv_u8mf8(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8( @@ -418,7 +418,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vx_u8mf8(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4( @@ -427,7 +427,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vv_u8mf4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4( @@ -436,7 +436,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vx_u8mf4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2( @@ -445,7 +445,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vv_u8mf2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2( @@ -454,7 +454,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vx_u8mf2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1( @@ -463,7 +463,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vv_u8m1(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1( @@ -472,7 +472,7 @@ vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vx_u8m1(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2( @@ -481,7 +481,7 @@ vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vv_u8m2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2( @@ -490,7 +490,7 @@ vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vx_u8m2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4( @@ -499,7 +499,7 @@ vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vv_u8m4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4( @@ -508,7 +508,7 @@ vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vx_u8m4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8( @@ -517,7 +517,7 @@ vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vv_u8m8(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8( @@ -526,7 +526,7 @@ vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vx_u8m8(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4( @@ -535,7 +535,7 @@ vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vv_u16mf4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4( @@ -544,7 +544,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vx_u16mf4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2( @@ -553,7 +553,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vv_u16mf2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2( @@ -562,7 +562,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vx_u16mf2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1( @@ -571,7 +571,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vv_u16m1(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1( @@ -580,7 +580,7 @@ vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vx_u16m1(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2( @@ -589,7 +589,7 @@ vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vv_u16m2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2( @@ -598,7 +598,7 @@ vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vx_u16m2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4( @@ -607,7 +607,7 @@ vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vv_u16m4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4( @@ -616,7 +616,7 @@ vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vx_u16m4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8( @@ -625,7 +625,7 @@ vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vv_u16m8(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8( @@ -634,7 +634,7 @@ vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vx_u16m8(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2( @@ -643,7 +643,7 @@ vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vv_u32mf2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2( @@ -652,7 +652,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vx_u32mf2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1( @@ -661,7 +661,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vv_u32m1(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1( @@ -670,7 +670,7 @@ vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vx_u32m1(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2( @@ -679,7 +679,7 @@ vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vv_u32m2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2( @@ -688,7 +688,7 @@ vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vx_u32m2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4( @@ -697,7 +697,7 @@ vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vv_u32m4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4( @@ -706,7 +706,7 @@ vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vx_u32m4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8( @@ -715,7 +715,7 @@ vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vv_u32m8(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8( @@ -724,7 +724,7 @@ vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vx_u32m8(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1( @@ -733,7 +733,7 @@ vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vv_u64m1(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1( @@ -742,7 +742,7 @@ vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vx_u64m1(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2( @@ -751,7 +751,7 @@ vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vv_u64m2(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2( @@ -760,7 +760,7 @@ vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vx_u64m2(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4( @@ -769,7 +769,7 @@ vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vv_u64m4(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4( @@ -778,7 +778,7 @@ vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vx_u64m4(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8( @@ -787,7 +787,7 @@ vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vv_u64m8(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8( @@ -796,7 +796,7 @@ vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vx_u64m8(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m( @@ -805,7 +805,7 @@ vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vv_i8mf8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m( @@ -814,7 +814,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vx_i8mf8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m( @@ -823,7 +823,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vv_i8mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m( @@ -832,7 +832,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vx_i8mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m( @@ -841,7 +841,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vv_i8mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m( @@ -850,7 +850,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vx_i8mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m( @@ -859,7 +859,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmacc_vv_i8m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m( @@ -868,7 +868,7 @@ vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmacc_vx_i8m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m( @@ -877,7 +877,7 @@ vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmacc_vv_i8m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m( @@ -886,7 +886,7 @@ vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmacc_vx_i8m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m( @@ -895,7 +895,7 @@ vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmacc_vv_i8m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m( @@ -904,7 +904,7 @@ vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmacc_vx_i8m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m( @@ -913,7 +913,7 @@ vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmacc_vv_i8m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m( @@ -922,7 +922,7 @@ vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmacc_vx_i8m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m( @@ -931,7 +931,7 @@ vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vv_i16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m( @@ -940,7 +940,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vx_i16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m( @@ -949,7 +949,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vv_i16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m( @@ -958,7 +958,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vx_i16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m( @@ -967,7 +967,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmacc_vv_i16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m( @@ -976,7 +976,7 @@ vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmacc_vx_i16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m( @@ -985,7 +985,7 @@ vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmacc_vv_i16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m( @@ -994,7 +994,7 @@ vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmacc_vx_i16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m( @@ -1003,7 +1003,7 @@ vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmacc_vv_i16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m( @@ -1012,7 +1012,7 @@ vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmacc_vx_i16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m( @@ -1021,7 +1021,7 @@ vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmacc_vv_i16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m( @@ -1030,7 +1030,7 @@ vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmacc_vx_i16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m( @@ -1039,7 +1039,7 @@ vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vv_i32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m( @@ -1048,7 +1048,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vx_i32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m( @@ -1057,7 +1057,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmacc_vv_i32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m( @@ -1066,7 +1066,7 @@ vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmacc_vx_i32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m( @@ -1075,7 +1075,7 @@ vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmacc_vv_i32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m( @@ -1084,7 +1084,7 @@ vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmacc_vx_i32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m( @@ -1093,7 +1093,7 @@ vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmacc_vv_i32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m( @@ -1102,7 +1102,7 @@ vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmacc_vx_i32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m( @@ -1111,7 +1111,7 @@ vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmacc_vv_i32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m( @@ -1120,7 +1120,7 @@ vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmacc_vx_i32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m( @@ -1129,7 +1129,7 @@ vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmacc_vv_i64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m( @@ -1138,7 +1138,7 @@ vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmacc_vx_i64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m( @@ -1147,7 +1147,7 @@ vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmacc_vv_i64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m( @@ -1156,7 +1156,7 @@ vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmacc_vx_i64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m( @@ -1165,7 +1165,7 @@ vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmacc_vv_i64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m( @@ -1174,7 +1174,7 @@ vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmacc_vx_i64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m( @@ -1183,7 +1183,7 @@ vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmacc_vv_i64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m( @@ -1192,7 +1192,7 @@ vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmacc_vx_i64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m( @@ -1201,7 +1201,7 @@ vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vv_u8mf8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m( @@ -1210,7 +1210,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vx_u8mf8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m( @@ -1219,7 +1219,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vv_u8mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m( @@ -1228,7 +1228,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vx_u8mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m( @@ -1237,7 +1237,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vv_u8mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m( @@ -1246,7 +1246,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vx_u8mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m( @@ -1255,7 +1255,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vv_u8m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m( @@ -1264,7 +1264,7 @@ vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vx_u8m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m( @@ -1273,7 +1273,7 @@ vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vv_u8m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m( @@ -1282,7 +1282,7 @@ vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vx_u8m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m( @@ -1291,7 +1291,7 @@ vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vv_u8m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m( @@ -1300,7 +1300,7 @@ vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vx_u8m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m( @@ -1309,7 +1309,7 @@ vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vv_u8m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m( @@ -1318,7 +1318,7 @@ vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vx_u8m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m( @@ -1327,7 +1327,7 @@ vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vv_u16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m( @@ -1336,7 +1336,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vx_u16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m( @@ -1345,7 +1345,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vv_u16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m( @@ -1354,7 +1354,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vx_u16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m( @@ -1363,7 +1363,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vv_u16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m( @@ -1372,7 +1372,7 @@ vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vx_u16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m( @@ -1381,7 +1381,7 @@ vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vv_u16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m( @@ -1390,7 +1390,7 @@ vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vx_u16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m( @@ -1399,7 +1399,7 @@ vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vv_u16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m( @@ -1408,7 +1408,7 @@ vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vx_u16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m( @@ -1417,7 +1417,7 @@ vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vv_u16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m( @@ -1426,7 +1426,7 @@ vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vx_u16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m( @@ -1435,7 +1435,7 @@ vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vv_u32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m( @@ -1444,7 +1444,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vx_u32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m( @@ -1453,7 +1453,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vv_u32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m( @@ -1462,7 +1462,7 @@ vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vx_u32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m( @@ -1471,7 +1471,7 @@ vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vv_u32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m( @@ -1480,7 +1480,7 @@ vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vx_u32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m( @@ -1489,7 +1489,7 @@ vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vv_u32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m( @@ -1498,7 +1498,7 @@ vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vx_u32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m( @@ -1507,7 +1507,7 @@ vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vv_u32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m( @@ -1516,7 +1516,7 @@ vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vx_u32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m( @@ -1525,7 +1525,7 @@ vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vv_u64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m( @@ -1534,7 +1534,7 @@ vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vx_u64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m( @@ -1543,7 +1543,7 @@ vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vv_u64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m( @@ -1552,7 +1552,7 @@ vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vx_u64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m( @@ -1561,7 +1561,7 @@ vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vv_u64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m( @@ -1570,7 +1570,7 @@ vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vx_u64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m( @@ -1579,7 +1579,7 @@ vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vv_u64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m( @@ -1588,6 +1588,6 @@ vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vx_u64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadc.c index 1d2876673141..8ca11d110138 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadc.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vvm_i8mf8_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i8mf8_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf8_b64( @@ -21,7 +21,7 @@ vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vxm_i8mf8_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i8mf8_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf8_b64( @@ -30,7 +30,7 @@ vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmadc_vv_i8mf8_b64(op1, op2, vl); + return __riscv_vmadc_vv_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf8_b64( @@ -39,7 +39,7 @@ vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmadc_vx_i8mf8_b64(op1, op2, vl); + return __riscv_vmadc_vx_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf4_b32( @@ -48,7 +48,7 @@ vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vvm_i8mf4_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i8mf4_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf4_b32( @@ -57,7 +57,7 @@ vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vxm_i8mf4_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i8mf4_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf4_b32( @@ -66,7 +66,7 @@ vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmadc_vv_i8mf4_b32(op1, op2, vl); + return __riscv_vmadc_vv_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf4_b32( @@ -75,7 +75,7 @@ vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmadc_vx_i8mf4_b32(op1, op2, vl); + return __riscv_vmadc_vx_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf2_b16( @@ -84,7 +84,7 @@ vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vvm_i8mf2_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i8mf2_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf2_b16( @@ -93,7 +93,7 @@ vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vxm_i8mf2_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i8mf2_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf2_b16( @@ -102,7 +102,7 @@ vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmadc_vv_i8mf2_b16(op1, op2, vl); + return __riscv_vmadc_vv_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf2_b16( @@ -111,7 +111,7 @@ vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmadc_vx_i8mf2_b16(op1, op2, vl); + return __riscv_vmadc_vx_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m1_b8( @@ -120,7 +120,7 @@ vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vvm_i8m1_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i8m1_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m1_b8( @@ -129,7 +129,7 @@ vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vxm_i8m1_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i8m1_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i8m1_b8( @@ -138,7 +138,7 @@ vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmadc_vv_i8m1_b8(op1, op2, vl); + return __riscv_vmadc_vv_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i8m1_b8( @@ -147,7 +147,7 @@ vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return vmadc_vx_i8m1_b8(op1, op2, vl); + return __riscv_vmadc_vx_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m2_b4( @@ -156,7 +156,7 @@ vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vvm_i8m2_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i8m2_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m2_b4( @@ -165,7 +165,7 @@ vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vxm_i8m2_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i8m2_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i8m2_b4( @@ -174,7 +174,7 @@ vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmadc_vv_i8m2_b4(op1, op2, vl); + return __riscv_vmadc_vv_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i8m2_b4( @@ -183,7 +183,7 @@ vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return vmadc_vx_i8m2_b4(op1, op2, vl); + return __riscv_vmadc_vx_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m4_b2( @@ -192,7 +192,7 @@ vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) { - return vmadc_vvm_i8m4_b2(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i8m4_b2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m4_b2( @@ -201,7 +201,7 @@ vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) { - return vmadc_vxm_i8m4_b2(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i8m4_b2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i8m4_b2( @@ -210,7 +210,7 @@ vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmadc_vv_i8m4_b2(op1, op2, vl); + return __riscv_vmadc_vv_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i8m4_b2( @@ -219,7 +219,7 @@ vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return vmadc_vx_i8m4_b2(op1, op2, vl); + return __riscv_vmadc_vx_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m8_b1( @@ -228,7 +228,7 @@ vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) { - return vmadc_vvm_i8m8_b1(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i8m8_b1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m8_b1( @@ -237,7 +237,7 @@ vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) { - return vmadc_vxm_i8m8_b1(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i8m8_b1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i8m8_b1( @@ -246,7 +246,7 @@ vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmadc_vv_i8m8_b1(op1, op2, vl); + return __riscv_vmadc_vv_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i8m8_b1( @@ -255,7 +255,7 @@ vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return vmadc_vx_i8m8_b1(op1, op2, vl); + return __riscv_vmadc_vx_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf4_b64( @@ -264,7 +264,7 @@ vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vvm_i16mf4_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i16mf4_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf4_b64( @@ -273,7 +273,7 @@ vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vxm_i16mf4_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i16mf4_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i16mf4_b64( @@ -282,7 +282,7 @@ vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmadc_vv_i16mf4_b64(op1, op2, vl); + return __riscv_vmadc_vv_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i16mf4_b64( @@ -291,7 +291,7 @@ vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmadc_vx_i16mf4_b64(op1, op2, vl); + return __riscv_vmadc_vx_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf2_b32( @@ -300,7 +300,7 @@ vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vvm_i16mf2_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i16mf2_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf2_b32( @@ -309,7 +309,7 @@ vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vxm_i16mf2_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i16mf2_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i16mf2_b32( @@ -318,7 +318,7 @@ vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmadc_vv_i16mf2_b32(op1, op2, vl); + return __riscv_vmadc_vv_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i16mf2_b32( @@ -327,7 +327,7 @@ vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmadc_vx_i16mf2_b32(op1, op2, vl); + return __riscv_vmadc_vx_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m1_b16( @@ -336,7 +336,7 @@ vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vvm_i16m1_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i16m1_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m1_b16( @@ -345,7 +345,7 @@ vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vxm_i16m1_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i16m1_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i16m1_b16( @@ -354,7 +354,7 @@ vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmadc_vv_i16m1_b16(op1, op2, vl); + return __riscv_vmadc_vv_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i16m1_b16( @@ -363,7 +363,7 @@ vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return vmadc_vx_i16m1_b16(op1, op2, vl); + return __riscv_vmadc_vx_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m2_b8( @@ -372,7 +372,7 @@ vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vvm_i16m2_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i16m2_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m2_b8( @@ -381,7 +381,7 @@ vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vxm_i16m2_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i16m2_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i16m2_b8( @@ -390,7 +390,7 @@ vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmadc_vv_i16m2_b8(op1, op2, vl); + return __riscv_vmadc_vv_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i16m2_b8( @@ -399,7 +399,7 @@ vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return vmadc_vx_i16m2_b8(op1, op2, vl); + return __riscv_vmadc_vx_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m4_b4( @@ -408,7 +408,7 @@ vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vvm_i16m4_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i16m4_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m4_b4( @@ -417,7 +417,7 @@ vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vxm_i16m4_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i16m4_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i16m4_b4( @@ -426,7 +426,7 @@ vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmadc_vv_i16m4_b4(op1, op2, vl); + return __riscv_vmadc_vv_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i16m4_b4( @@ -435,7 +435,7 @@ vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmadc_vx_i16m4_b4(op1, op2, vl); + return __riscv_vmadc_vx_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m8_b2( @@ -444,7 +444,7 @@ vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) { - return vmadc_vvm_i16m8_b2(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i16m8_b2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m8_b2( @@ -453,7 +453,7 @@ vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) { - return vmadc_vxm_i16m8_b2(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i16m8_b2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i16m8_b2( @@ -462,7 +462,7 @@ vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmadc_vv_i16m8_b2(op1, op2, vl); + return __riscv_vmadc_vv_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i16m8_b2( @@ -471,7 +471,7 @@ vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return vmadc_vx_i16m8_b2(op1, op2, vl); + return __riscv_vmadc_vx_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i32mf2_b64( @@ -480,7 +480,7 @@ vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vvm_i32mf2_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i32mf2_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i32mf2_b64( @@ -489,7 +489,7 @@ vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vxm_i32mf2_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i32mf2_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i32mf2_b64( @@ -498,7 +498,7 @@ vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmadc_vv_i32mf2_b64(op1, op2, vl); + return __riscv_vmadc_vv_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i32mf2_b64( @@ -507,7 +507,7 @@ vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmadc_vx_i32mf2_b64(op1, op2, vl); + return __riscv_vmadc_vx_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m1_b32( @@ -516,7 +516,7 @@ vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vvm_i32m1_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i32m1_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m1_b32( @@ -525,7 +525,7 @@ vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vxm_i32m1_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i32m1_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i32m1_b32( @@ -534,7 +534,7 @@ vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmadc_vv_i32m1_b32(op1, op2, vl); + return __riscv_vmadc_vv_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i32m1_b32( @@ -543,7 +543,7 @@ vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return vmadc_vx_i32m1_b32(op1, op2, vl); + return __riscv_vmadc_vx_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m2_b16( @@ -552,7 +552,7 @@ vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vvm_i32m2_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i32m2_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m2_b16( @@ -561,7 +561,7 @@ vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vxm_i32m2_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i32m2_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i32m2_b16( @@ -570,7 +570,7 @@ vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmadc_vv_i32m2_b16(op1, op2, vl); + return __riscv_vmadc_vv_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i32m2_b16( @@ -579,7 +579,7 @@ vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return vmadc_vx_i32m2_b16(op1, op2, vl); + return __riscv_vmadc_vx_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m4_b8( @@ -588,7 +588,7 @@ vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vvm_i32m4_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i32m4_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m4_b8( @@ -597,7 +597,7 @@ vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vxm_i32m4_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i32m4_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i32m4_b8( @@ -606,7 +606,7 @@ vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmadc_vv_i32m4_b8(op1, op2, vl); + return __riscv_vmadc_vv_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i32m4_b8( @@ -615,7 +615,7 @@ vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return vmadc_vx_i32m4_b8(op1, op2, vl); + return __riscv_vmadc_vx_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m8_b4( @@ -624,7 +624,7 @@ vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vvm_i32m8_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i32m8_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m8_b4( @@ -633,7 +633,7 @@ vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vxm_i32m8_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i32m8_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i32m8_b4( @@ -642,7 +642,7 @@ vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmadc_vv_i32m8_b4(op1, op2, vl); + return __riscv_vmadc_vv_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i32m8_b4( @@ -651,7 +651,7 @@ vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return vmadc_vx_i32m8_b4(op1, op2, vl); + return __riscv_vmadc_vx_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m1_b64( @@ -660,7 +660,7 @@ vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vvm_i64m1_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i64m1_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m1_b64( @@ -669,7 +669,7 @@ vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vxm_i64m1_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i64m1_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i64m1_b64( @@ -678,7 +678,7 @@ vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmadc_vv_i64m1_b64(op1, op2, vl); + return __riscv_vmadc_vv_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i64m1_b64( @@ -687,7 +687,7 @@ vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return vmadc_vx_i64m1_b64(op1, op2, vl); + return __riscv_vmadc_vx_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m2_b32( @@ -696,7 +696,7 @@ vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vvm_i64m2_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i64m2_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m2_b32( @@ -705,7 +705,7 @@ vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vxm_i64m2_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i64m2_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i64m2_b32( @@ -714,7 +714,7 @@ vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmadc_vv_i64m2_b32(op1, op2, vl); + return __riscv_vmadc_vv_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i64m2_b32( @@ -723,7 +723,7 @@ vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return vmadc_vx_i64m2_b32(op1, op2, vl); + return __riscv_vmadc_vx_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m4_b16( @@ -732,7 +732,7 @@ vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vvm_i64m4_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i64m4_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m4_b16( @@ -741,7 +741,7 @@ vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t car // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vxm_i64m4_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i64m4_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i64m4_b16( @@ -750,7 +750,7 @@ vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmadc_vv_i64m4_b16(op1, op2, vl); + return __riscv_vmadc_vv_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i64m4_b16( @@ -759,7 +759,7 @@ vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return vmadc_vx_i64m4_b16(op1, op2, vl); + return __riscv_vmadc_vx_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m8_b8( @@ -768,7 +768,7 @@ vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vvm_i64m8_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_i64m8_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m8_b8( @@ -777,7 +777,7 @@ vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vxm_i64m8_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_i64m8_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_i64m8_b8( @@ -786,7 +786,7 @@ vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmadc_vv_i64m8_b8(op1, op2, vl); + return __riscv_vmadc_vv_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_i64m8_b8( @@ -795,7 +795,7 @@ vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmadc_vx_i64m8_b8(op1, op2, vl); + return __riscv_vmadc_vx_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf8_b64( @@ -804,7 +804,7 @@ vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vvm_u8mf8_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u8mf8_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf8_b64( @@ -813,7 +813,7 @@ vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t c // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vxm_u8mf8_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u8mf8_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf8_b64( @@ -822,7 +822,7 @@ vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t carry // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmadc_vv_u8mf8_b64(op1, op2, vl); + return __riscv_vmadc_vv_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf8_b64( @@ -831,7 +831,7 @@ vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmadc_vx_u8mf8_b64(op1, op2, vl); + return __riscv_vmadc_vx_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf4_b32( @@ -840,7 +840,7 @@ vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vvm_u8mf4_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u8mf4_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf4_b32( @@ -849,7 +849,7 @@ vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t c // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vxm_u8mf4_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u8mf4_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf4_b32( @@ -858,7 +858,7 @@ vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t carry // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmadc_vv_u8mf4_b32(op1, op2, vl); + return __riscv_vmadc_vv_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf4_b32( @@ -867,7 +867,7 @@ vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmadc_vx_u8mf4_b32(op1, op2, vl); + return __riscv_vmadc_vx_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf2_b16( @@ -876,7 +876,7 @@ vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vvm_u8mf2_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u8mf2_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf2_b16( @@ -885,7 +885,7 @@ vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t c // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vxm_u8mf2_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u8mf2_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf2_b16( @@ -894,7 +894,7 @@ vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t carry // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmadc_vv_u8mf2_b16(op1, op2, vl); + return __riscv_vmadc_vv_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf2_b16( @@ -903,7 +903,7 @@ vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmadc_vx_u8mf2_b16(op1, op2, vl); + return __riscv_vmadc_vx_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m1_b8( @@ -912,7 +912,7 @@ vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vvm_u8m1_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u8m1_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m1_b8( @@ -921,7 +921,7 @@ vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vxm_u8m1_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u8m1_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u8m1_b8( @@ -930,7 +930,7 @@ vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmadc_vv_u8m1_b8(op1, op2, vl); + return __riscv_vmadc_vv_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u8m1_b8( @@ -939,7 +939,7 @@ vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmadc_vx_u8m1_b8(op1, op2, vl); + return __riscv_vmadc_vx_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m2_b4( @@ -948,7 +948,7 @@ vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vvm_u8m2_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u8m2_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m2_b4( @@ -957,7 +957,7 @@ vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vxm_u8m2_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u8m2_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u8m2_b4( @@ -966,7 +966,7 @@ vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmadc_vv_u8m2_b4(op1, op2, vl); + return __riscv_vmadc_vv_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u8m2_b4( @@ -975,7 +975,7 @@ vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmadc_vx_u8m2_b4(op1, op2, vl); + return __riscv_vmadc_vx_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m4_b2( @@ -984,7 +984,7 @@ vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) { - return vmadc_vvm_u8m4_b2(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u8m4_b2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m4_b2( @@ -993,7 +993,7 @@ vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) { - return vmadc_vxm_u8m4_b2(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u8m4_b2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u8m4_b2( @@ -1002,7 +1002,7 @@ vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmadc_vv_u8m4_b2(op1, op2, vl); + return __riscv_vmadc_vv_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u8m4_b2( @@ -1011,7 +1011,7 @@ vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmadc_vx_u8m4_b2(op1, op2, vl); + return __riscv_vmadc_vx_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m8_b1( @@ -1020,7 +1020,7 @@ vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) { - return vmadc_vvm_u8m8_b1(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u8m8_b1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m8_b1( @@ -1029,7 +1029,7 @@ vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) { - return vmadc_vxm_u8m8_b1(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u8m8_b1(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u8m8_b1( @@ -1038,7 +1038,7 @@ vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmadc_vv_u8m8_b1(op1, op2, vl); + return __riscv_vmadc_vv_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u8m8_b1( @@ -1047,7 +1047,7 @@ vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmadc_vx_u8m8_b1(op1, op2, vl); + return __riscv_vmadc_vx_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf4_b64( @@ -1056,7 +1056,7 @@ vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vvm_u16mf4_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u16mf4_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf4_b64( @@ -1065,7 +1065,7 @@ vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vxm_u16mf4_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u16mf4_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u16mf4_b64( @@ -1074,7 +1074,7 @@ vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t ca // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmadc_vv_u16mf4_b64(op1, op2, vl); + return __riscv_vmadc_vv_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u16mf4_b64( @@ -1083,7 +1083,7 @@ vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmadc_vx_u16mf4_b64(op1, op2, vl); + return __riscv_vmadc_vx_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf2_b32( @@ -1092,7 +1092,7 @@ vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vvm_u16mf2_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u16mf2_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf2_b32( @@ -1101,7 +1101,7 @@ vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vxm_u16mf2_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u16mf2_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u16mf2_b32( @@ -1110,7 +1110,7 @@ vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t ca // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmadc_vv_u16mf2_b32(op1, op2, vl); + return __riscv_vmadc_vv_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u16mf2_b32( @@ -1119,7 +1119,7 @@ vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmadc_vx_u16mf2_b32(op1, op2, vl); + return __riscv_vmadc_vx_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m1_b16( @@ -1128,7 +1128,7 @@ vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vvm_u16m1_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u16m1_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m1_b16( @@ -1137,7 +1137,7 @@ vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t c // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vxm_u16m1_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u16m1_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u16m1_b16( @@ -1146,7 +1146,7 @@ vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmadc_vv_u16m1_b16(op1, op2, vl); + return __riscv_vmadc_vv_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u16m1_b16( @@ -1155,7 +1155,7 @@ vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmadc_vx_u16m1_b16(op1, op2, vl); + return __riscv_vmadc_vx_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m2_b8( @@ -1164,7 +1164,7 @@ vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vvm_u16m2_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u16m2_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m2_b8( @@ -1173,7 +1173,7 @@ vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vxm_u16m2_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u16m2_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u16m2_b8( @@ -1182,7 +1182,7 @@ vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmadc_vv_u16m2_b8(op1, op2, vl); + return __riscv_vmadc_vv_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u16m2_b8( @@ -1191,7 +1191,7 @@ vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmadc_vx_u16m2_b8(op1, op2, vl); + return __riscv_vmadc_vx_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m4_b4( @@ -1200,7 +1200,7 @@ vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vvm_u16m4_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u16m4_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m4_b4( @@ -1209,7 +1209,7 @@ vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vxm_u16m4_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u16m4_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u16m4_b4( @@ -1218,7 +1218,7 @@ vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmadc_vv_u16m4_b4(op1, op2, vl); + return __riscv_vmadc_vv_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u16m4_b4( @@ -1227,7 +1227,7 @@ vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmadc_vx_u16m4_b4(op1, op2, vl); + return __riscv_vmadc_vx_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m8_b2( @@ -1236,7 +1236,7 @@ vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) { - return vmadc_vvm_u16m8_b2(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u16m8_b2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m8_b2( @@ -1245,7 +1245,7 @@ vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) { - return vmadc_vxm_u16m8_b2(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u16m8_b2(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u16m8_b2( @@ -1254,7 +1254,7 @@ vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmadc_vv_u16m8_b2(op1, op2, vl); + return __riscv_vmadc_vv_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u16m8_b2( @@ -1263,7 +1263,7 @@ vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmadc_vx_u16m8_b2(op1, op2, vl); + return __riscv_vmadc_vx_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u32mf2_b64( @@ -1272,7 +1272,7 @@ vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vvm_u32mf2_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u32mf2_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u32mf2_b64( @@ -1281,7 +1281,7 @@ vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vxm_u32mf2_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u32mf2_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u32mf2_b64( @@ -1290,7 +1290,7 @@ vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t ca // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmadc_vv_u32mf2_b64(op1, op2, vl); + return __riscv_vmadc_vv_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u32mf2_b64( @@ -1299,7 +1299,7 @@ vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmadc_vx_u32mf2_b64(op1, op2, vl); + return __riscv_vmadc_vx_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m1_b32( @@ -1308,7 +1308,7 @@ vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vvm_u32m1_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u32m1_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m1_b32( @@ -1317,7 +1317,7 @@ vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t c // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vxm_u32m1_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u32m1_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u32m1_b32( @@ -1326,7 +1326,7 @@ vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmadc_vv_u32m1_b32(op1, op2, vl); + return __riscv_vmadc_vv_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u32m1_b32( @@ -1335,7 +1335,7 @@ vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmadc_vx_u32m1_b32(op1, op2, vl); + return __riscv_vmadc_vx_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m2_b16( @@ -1344,7 +1344,7 @@ vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vvm_u32m2_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u32m2_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m2_b16( @@ -1353,7 +1353,7 @@ vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t c // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vxm_u32m2_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u32m2_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u32m2_b16( @@ -1362,7 +1362,7 @@ vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmadc_vv_u32m2_b16(op1, op2, vl); + return __riscv_vmadc_vv_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u32m2_b16( @@ -1371,7 +1371,7 @@ vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmadc_vx_u32m2_b16(op1, op2, vl); + return __riscv_vmadc_vx_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m4_b8( @@ -1380,7 +1380,7 @@ vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vvm_u32m4_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u32m4_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m4_b8( @@ -1389,7 +1389,7 @@ vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vxm_u32m4_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u32m4_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u32m4_b8( @@ -1398,7 +1398,7 @@ vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmadc_vv_u32m4_b8(op1, op2, vl); + return __riscv_vmadc_vv_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u32m4_b8( @@ -1407,7 +1407,7 @@ vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmadc_vx_u32m4_b8(op1, op2, vl); + return __riscv_vmadc_vx_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m8_b4( @@ -1416,7 +1416,7 @@ vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vvm_u32m8_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u32m8_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m8_b4( @@ -1425,7 +1425,7 @@ vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) { - return vmadc_vxm_u32m8_b4(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u32m8_b4(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u32m8_b4( @@ -1434,7 +1434,7 @@ vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmadc_vv_u32m8_b4(op1, op2, vl); + return __riscv_vmadc_vv_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u32m8_b4( @@ -1443,7 +1443,7 @@ vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmadc_vx_u32m8_b4(op1, op2, vl); + return __riscv_vmadc_vx_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m1_b64( @@ -1452,7 +1452,7 @@ vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vvm_u64m1_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u64m1_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m1_b64( @@ -1461,7 +1461,7 @@ vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t c // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) { - return vmadc_vxm_u64m1_b64(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u64m1_b64(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u64m1_b64( @@ -1470,7 +1470,7 @@ vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmadc_vv_u64m1_b64(op1, op2, vl); + return __riscv_vmadc_vv_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u64m1_b64( @@ -1479,7 +1479,7 @@ vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmadc_vx_u64m1_b64(op1, op2, vl); + return __riscv_vmadc_vx_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m2_b32( @@ -1488,7 +1488,7 @@ vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vvm_u64m2_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u64m2_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m2_b32( @@ -1497,7 +1497,7 @@ vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t c // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) { - return vmadc_vxm_u64m2_b32(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u64m2_b32(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u64m2_b32( @@ -1506,7 +1506,7 @@ vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmadc_vv_u64m2_b32(op1, op2, vl); + return __riscv_vmadc_vv_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u64m2_b32( @@ -1515,7 +1515,7 @@ vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmadc_vx_u64m2_b32(op1, op2, vl); + return __riscv_vmadc_vx_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m4_b16( @@ -1524,7 +1524,7 @@ vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vvm_u64m4_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u64m4_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m4_b16( @@ -1533,7 +1533,7 @@ vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t c // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) { - return vmadc_vxm_u64m4_b16(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u64m4_b16(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u64m4_b16( @@ -1542,7 +1542,7 @@ vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmadc_vv_u64m4_b16(op1, op2, vl); + return __riscv_vmadc_vv_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u64m4_b16( @@ -1551,7 +1551,7 @@ vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmadc_vx_u64m4_b16(op1, op2, vl); + return __riscv_vmadc_vx_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m8_b8( @@ -1560,7 +1560,7 @@ vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vvm_u64m8_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vvm_u64m8_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m8_b8( @@ -1569,7 +1569,7 @@ vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) { - return vmadc_vxm_u64m8_b8(op1, op2, carryin, vl); + return __riscv_vmadc_vxm_u64m8_b8(op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vmadc_vv_u64m8_b8( @@ -1578,7 +1578,7 @@ vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmadc_vv_u64m8_b8(op1, op2, vl); + return __riscv_vmadc_vv_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmadc_vx_u64m8_b8( @@ -1587,6 +1587,6 @@ vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmadc_vx_u64m8_b8(op1, op2, vl); + return __riscv_vmadc_vx_u64m8_b8(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c index bea91a4493bd..3361cd52275b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmadd.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vv_i8mf8(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8( @@ -22,7 +22,7 @@ vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vx_i8mf8(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4( @@ -31,7 +31,7 @@ vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vv_i8mf4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4( @@ -40,7 +40,7 @@ vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vx_i8mf4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2( @@ -49,7 +49,7 @@ vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vv_i8mf2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2( @@ -58,7 +58,7 @@ vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vx_i8mf2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1( @@ -67,7 +67,7 @@ vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmadd_vv_i8m1(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1( @@ -76,7 +76,7 @@ vint8m1_t test_vmadd_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmadd_vx_i8m1(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2( @@ -85,7 +85,7 @@ vint8m1_t test_vmadd_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmadd_vv_i8m2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2( @@ -94,7 +94,7 @@ vint8m2_t test_vmadd_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmadd_vx_i8m2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4( @@ -103,7 +103,7 @@ vint8m2_t test_vmadd_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmadd_vv_i8m4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4( @@ -112,7 +112,7 @@ vint8m4_t test_vmadd_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmadd_vx_i8m4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8( @@ -121,7 +121,7 @@ vint8m4_t test_vmadd_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmadd_vv_i8m8(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8( @@ -130,7 +130,7 @@ vint8m8_t test_vmadd_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmadd_vx_i8m8(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4( @@ -139,7 +139,7 @@ vint8m8_t test_vmadd_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vv_i16mf4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4( @@ -148,7 +148,7 @@ vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vx_i16mf4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2( @@ -157,7 +157,7 @@ vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vv_i16mf2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2( @@ -166,7 +166,7 @@ vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vx_i16mf2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1( @@ -175,7 +175,7 @@ vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmadd_vv_i16m1(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1( @@ -184,7 +184,7 @@ vint16m1_t test_vmadd_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmadd_vx_i16m1(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2( @@ -193,7 +193,7 @@ vint16m1_t test_vmadd_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmadd_vv_i16m2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2( @@ -202,7 +202,7 @@ vint16m2_t test_vmadd_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmadd_vx_i16m2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4( @@ -211,7 +211,7 @@ vint16m2_t test_vmadd_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmadd_vv_i16m4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4( @@ -220,7 +220,7 @@ vint16m4_t test_vmadd_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmadd_vx_i16m4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8( @@ -229,7 +229,7 @@ vint16m4_t test_vmadd_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmadd_vv_i16m8(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8( @@ -238,7 +238,7 @@ vint16m8_t test_vmadd_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmadd_vx_i16m8(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2( @@ -247,7 +247,7 @@ vint16m8_t test_vmadd_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vv_i32mf2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2( @@ -256,7 +256,7 @@ vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vx_i32mf2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1( @@ -265,7 +265,7 @@ vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmadd_vv_i32m1(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1( @@ -274,7 +274,7 @@ vint32m1_t test_vmadd_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmadd_vx_i32m1(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2( @@ -283,7 +283,7 @@ vint32m1_t test_vmadd_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmadd_vv_i32m2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2( @@ -292,7 +292,7 @@ vint32m2_t test_vmadd_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmadd_vx_i32m2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4( @@ -301,7 +301,7 @@ vint32m2_t test_vmadd_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmadd_vv_i32m4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4( @@ -310,7 +310,7 @@ vint32m4_t test_vmadd_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmadd_vx_i32m4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8( @@ -319,7 +319,7 @@ vint32m4_t test_vmadd_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmadd_vv_i32m8(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8( @@ -328,7 +328,7 @@ vint32m8_t test_vmadd_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmadd_vx_i32m8(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1( @@ -337,7 +337,7 @@ vint32m8_t test_vmadd_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmadd_vv_i64m1(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1( @@ -346,7 +346,7 @@ vint64m1_t test_vmadd_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmadd_vx_i64m1(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2( @@ -355,7 +355,7 @@ vint64m1_t test_vmadd_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmadd_vv_i64m2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2( @@ -364,7 +364,7 @@ vint64m2_t test_vmadd_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmadd_vx_i64m2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4( @@ -373,7 +373,7 @@ vint64m2_t test_vmadd_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmadd_vv_i64m4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4( @@ -382,7 +382,7 @@ vint64m4_t test_vmadd_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmadd_vx_i64m4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8( @@ -391,7 +391,7 @@ vint64m4_t test_vmadd_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmadd_vv_i64m8(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8( @@ -400,7 +400,7 @@ vint64m8_t test_vmadd_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmadd_vx_i64m8(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8( @@ -409,7 +409,7 @@ vint64m8_t test_vmadd_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vv_u8mf8(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8( @@ -418,7 +418,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vx_u8mf8(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4( @@ -427,7 +427,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vv_u8mf4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4( @@ -436,7 +436,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vx_u8mf4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2( @@ -445,7 +445,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vv_u8mf2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2( @@ -454,7 +454,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vx_u8mf2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1( @@ -463,7 +463,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vv_u8m1(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1( @@ -472,7 +472,7 @@ vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vx_u8m1(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2( @@ -481,7 +481,7 @@ vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vv_u8m2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2( @@ -490,7 +490,7 @@ vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vx_u8m2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4( @@ -499,7 +499,7 @@ vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vv_u8m4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4( @@ -508,7 +508,7 @@ vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vx_u8m4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8( @@ -517,7 +517,7 @@ vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vv_u8m8(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8( @@ -526,7 +526,7 @@ vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vx_u8m8(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4( @@ -535,7 +535,7 @@ vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vv_u16mf4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4( @@ -544,7 +544,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vx_u16mf4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2( @@ -553,7 +553,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vv_u16mf2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2( @@ -562,7 +562,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vx_u16mf2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1( @@ -571,7 +571,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vv_u16m1(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1( @@ -580,7 +580,7 @@ vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vx_u16m1(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2( @@ -589,7 +589,7 @@ vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vv_u16m2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2( @@ -598,7 +598,7 @@ vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vx_u16m2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4( @@ -607,7 +607,7 @@ vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vv_u16m4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4( @@ -616,7 +616,7 @@ vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vx_u16m4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8( @@ -625,7 +625,7 @@ vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vv_u16m8(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8( @@ -634,7 +634,7 @@ vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vx_u16m8(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2( @@ -643,7 +643,7 @@ vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vv_u32mf2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2( @@ -652,7 +652,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vx_u32mf2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1( @@ -661,7 +661,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vv_u32m1(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1( @@ -670,7 +670,7 @@ vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vx_u32m1(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2( @@ -679,7 +679,7 @@ vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vv_u32m2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2( @@ -688,7 +688,7 @@ vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vx_u32m2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4( @@ -697,7 +697,7 @@ vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vv_u32m4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4( @@ -706,7 +706,7 @@ vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vx_u32m4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8( @@ -715,7 +715,7 @@ vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vv_u32m8(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8( @@ -724,7 +724,7 @@ vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vx_u32m8(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1( @@ -733,7 +733,7 @@ vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vv_u64m1(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1( @@ -742,7 +742,7 @@ vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vx_u64m1(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2( @@ -751,7 +751,7 @@ vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vv_u64m2(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2( @@ -760,7 +760,7 @@ vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vx_u64m2(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4( @@ -769,7 +769,7 @@ vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vv_u64m4(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4( @@ -778,7 +778,7 @@ vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vx_u64m4(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8( @@ -787,7 +787,7 @@ vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vv_u64m8(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8( @@ -796,7 +796,7 @@ vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vx_u64m8(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m( @@ -805,7 +805,7 @@ vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vv_i8mf8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m( @@ -814,7 +814,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vx_i8mf8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m( @@ -823,7 +823,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vv_i8mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m( @@ -832,7 +832,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vx_i8mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m( @@ -841,7 +841,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vv_i8mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m( @@ -850,7 +850,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vx_i8mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m( @@ -859,7 +859,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmadd_vv_i8m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m( @@ -868,7 +868,7 @@ vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmadd_vx_i8m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m( @@ -877,7 +877,7 @@ vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmadd_vv_i8m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m( @@ -886,7 +886,7 @@ vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmadd_vx_i8m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m( @@ -895,7 +895,7 @@ vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmadd_vv_i8m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m( @@ -904,7 +904,7 @@ vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmadd_vx_i8m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m( @@ -913,7 +913,7 @@ vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmadd_vv_i8m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m( @@ -922,7 +922,7 @@ vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmadd_vx_i8m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m( @@ -931,7 +931,7 @@ vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vv_i16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m( @@ -940,7 +940,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vx_i16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m( @@ -949,7 +949,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vv_i16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m( @@ -958,7 +958,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vx_i16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m( @@ -967,7 +967,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmadd_vv_i16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m( @@ -976,7 +976,7 @@ vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmadd_vx_i16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m( @@ -985,7 +985,7 @@ vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmadd_vv_i16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m( @@ -994,7 +994,7 @@ vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmadd_vx_i16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m( @@ -1003,7 +1003,7 @@ vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmadd_vv_i16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m( @@ -1012,7 +1012,7 @@ vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmadd_vx_i16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m( @@ -1021,7 +1021,7 @@ vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmadd_vv_i16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m( @@ -1030,7 +1030,7 @@ vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmadd_vx_i16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m( @@ -1039,7 +1039,7 @@ vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vv_i32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m( @@ -1048,7 +1048,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vx_i32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m( @@ -1057,7 +1057,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmadd_vv_i32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m( @@ -1066,7 +1066,7 @@ vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmadd_vx_i32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m( @@ -1075,7 +1075,7 @@ vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmadd_vv_i32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m( @@ -1084,7 +1084,7 @@ vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmadd_vx_i32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m( @@ -1093,7 +1093,7 @@ vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmadd_vv_i32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m( @@ -1102,7 +1102,7 @@ vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmadd_vx_i32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m( @@ -1111,7 +1111,7 @@ vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmadd_vv_i32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m( @@ -1120,7 +1120,7 @@ vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmadd_vx_i32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m( @@ -1129,7 +1129,7 @@ vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmadd_vv_i64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m( @@ -1138,7 +1138,7 @@ vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmadd_vx_i64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m( @@ -1147,7 +1147,7 @@ vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmadd_vv_i64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m( @@ -1156,7 +1156,7 @@ vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmadd_vx_i64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m( @@ -1165,7 +1165,7 @@ vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmadd_vv_i64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m( @@ -1174,7 +1174,7 @@ vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmadd_vx_i64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m( @@ -1183,7 +1183,7 @@ vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmadd_vv_i64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m( @@ -1192,7 +1192,7 @@ vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmadd_vx_i64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m( @@ -1201,7 +1201,7 @@ vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vv_u8mf8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m( @@ -1210,7 +1210,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vx_u8mf8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m( @@ -1219,7 +1219,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vv_u8mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m( @@ -1228,7 +1228,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vx_u8mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m( @@ -1237,7 +1237,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vv_u8mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m( @@ -1246,7 +1246,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vx_u8mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m( @@ -1255,7 +1255,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vv_u8m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m( @@ -1264,7 +1264,7 @@ vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vx_u8m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m( @@ -1273,7 +1273,7 @@ vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vv_u8m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m( @@ -1282,7 +1282,7 @@ vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vx_u8m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m( @@ -1291,7 +1291,7 @@ vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vv_u8m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m( @@ -1300,7 +1300,7 @@ vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vx_u8m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m( @@ -1309,7 +1309,7 @@ vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vv_u8m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m( @@ -1318,7 +1318,7 @@ vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vx_u8m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m( @@ -1327,7 +1327,7 @@ vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vv_u16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m( @@ -1336,7 +1336,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vx_u16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m( @@ -1345,7 +1345,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vv_u16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m( @@ -1354,7 +1354,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vx_u16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m( @@ -1363,7 +1363,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vv_u16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m( @@ -1372,7 +1372,7 @@ vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vx_u16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m( @@ -1381,7 +1381,7 @@ vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vv_u16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m( @@ -1390,7 +1390,7 @@ vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vx_u16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m( @@ -1399,7 +1399,7 @@ vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vv_u16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m( @@ -1408,7 +1408,7 @@ vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vx_u16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m( @@ -1417,7 +1417,7 @@ vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vv_u16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m( @@ -1426,7 +1426,7 @@ vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vx_u16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m( @@ -1435,7 +1435,7 @@ vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vv_u32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m( @@ -1444,7 +1444,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vx_u32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m( @@ -1453,7 +1453,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vv_u32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m( @@ -1462,7 +1462,7 @@ vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vx_u32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m( @@ -1471,7 +1471,7 @@ vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vv_u32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m( @@ -1480,7 +1480,7 @@ vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vx_u32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m( @@ -1489,7 +1489,7 @@ vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vv_u32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m( @@ -1498,7 +1498,7 @@ vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vx_u32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m( @@ -1507,7 +1507,7 @@ vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vv_u32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m( @@ -1516,7 +1516,7 @@ vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vx_u32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m( @@ -1525,7 +1525,7 @@ vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vv_u64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m( @@ -1534,7 +1534,7 @@ vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vx_u64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m( @@ -1543,7 +1543,7 @@ vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vv_u64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m( @@ -1552,7 +1552,7 @@ vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vx_u64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m( @@ -1561,7 +1561,7 @@ vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vv_u64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m( @@ -1570,7 +1570,7 @@ vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vx_u64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m( @@ -1579,7 +1579,7 @@ vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vv_u64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m( @@ -1588,6 +1588,6 @@ vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vx_u64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmand.c index 6dbb5970eb56..01d43374038a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmand.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmand_mm_b1(op1, op2, vl); + return __riscv_vmand_mm_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmand_mm_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmand_mm_b2(op1, op2, vl); + return __riscv_vmand_mm_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmand_mm_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmand_mm_b4(op1, op2, vl); + return __riscv_vmand_mm_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmand_mm_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmand_mm_b8(op1, op2, vl); + return __riscv_vmand_mm_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmand_mm_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmand_mm_b16(op1, op2, vl); + return __riscv_vmand_mm_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmand_mm_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmand_mm_b32(op1, op2, vl); + return __riscv_vmand_mm_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmand_mm_b64( @@ -66,6 +66,6 @@ vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmand_mm_b64(op1, op2, vl); + return __riscv_vmand_mm_b64(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmandn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmandn.c index 7146f424e30e..90b316c260e4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmandn.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmandn.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmandn_mm_b1(op1, op2, vl); + return __riscv_vmandn_mm_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmandn_mm_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmandn_mm_b2(op1, op2, vl); + return __riscv_vmandn_mm_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmandn_mm_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmandn_mm_b4(op1, op2, vl); + return __riscv_vmandn_mm_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmandn_mm_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmandn_mm_b8(op1, op2, vl); + return __riscv_vmandn_mm_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmandn_mm_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmandn_mm_b16(op1, op2, vl); + return __riscv_vmandn_mm_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmandn_mm_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmandn_mm_b32(op1, op2, vl); + return __riscv_vmandn_mm_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmandn_mm_b64( @@ -66,6 +66,6 @@ vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmandn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmandn_mm_b64(op1, op2, vl); + return __riscv_vmandn_mm_b64(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmax.c index 248daaf8dcae..457835507bfe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmax.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmax_vv_i8mf8(op1, op2, vl); + return __riscv_vmax_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf8(op1, op2, vl); + return __riscv_vmax_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmax_vv_i8mf4(op1, op2, vl); + return __riscv_vmax_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf4(op1, op2, vl); + return __riscv_vmax_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmax_vv_i8mf2(op1, op2, vl); + return __riscv_vmax_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf2(op1, op2, vl); + return __riscv_vmax_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmax_vv_i8m1(op1, op2, vl); + return __riscv_vmax_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m1(op1, op2, vl); + return __riscv_vmax_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmax_vv_i8m2(op1, op2, vl); + return __riscv_vmax_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m2(op1, op2, vl); + return __riscv_vmax_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmax_vv_i8m4(op1, op2, vl); + return __riscv_vmax_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m4(op1, op2, vl); + return __riscv_vmax_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmax_vv_i8m8(op1, op2, vl); + return __riscv_vmax_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m8(op1, op2, vl); + return __riscv_vmax_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmax_vv_i16mf4(op1, op2, vl); + return __riscv_vmax_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf4(op1, op2, vl); + return __riscv_vmax_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmax_vv_i16mf2(op1, op2, vl); + return __riscv_vmax_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf2(op1, op2, vl); + return __riscv_vmax_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmax_vv_i16m1(op1, op2, vl); + return __riscv_vmax_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m1(op1, op2, vl); + return __riscv_vmax_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmax_vv_i16m2(op1, op2, vl); + return __riscv_vmax_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m2(op1, op2, vl); + return __riscv_vmax_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmax_vv_i16m4(op1, op2, vl); + return __riscv_vmax_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m4(op1, op2, vl); + return __riscv_vmax_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmax_vv_i16m8(op1, op2, vl); + return __riscv_vmax_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m8(op1, op2, vl); + return __riscv_vmax_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmax_vv_i32mf2(op1, op2, vl); + return __riscv_vmax_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32mf2(op1, op2, vl); + return __riscv_vmax_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmax_vv_i32m1(op1, op2, vl); + return __riscv_vmax_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m1(op1, op2, vl); + return __riscv_vmax_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmax_vv_i32m2(op1, op2, vl); + return __riscv_vmax_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m2(op1, op2, vl); + return __riscv_vmax_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmax_vv_i32m4(op1, op2, vl); + return __riscv_vmax_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m4(op1, op2, vl); + return __riscv_vmax_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmax_vv_i32m8(op1, op2, vl); + return __riscv_vmax_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m8(op1, op2, vl); + return __riscv_vmax_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmax_vv_i64m1(op1, op2, vl); + return __riscv_vmax_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m1(op1, op2, vl); + return __riscv_vmax_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmax_vv_i64m2(op1, op2, vl); + return __riscv_vmax_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m2(op1, op2, vl); + return __riscv_vmax_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmax_vv_i64m4(op1, op2, vl); + return __riscv_vmax_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m4(op1, op2, vl); + return __riscv_vmax_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmax_vv_i64m8(op1, op2, vl); + return __riscv_vmax_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m8(op1, op2, vl); + return __riscv_vmax_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmax_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmax_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmax_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmax_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmax_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmax_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmax_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmax_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmax_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmax_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmax_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmax_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmax_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmax_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmax_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmax_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmax_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmax_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmax_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmax_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmax_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmax_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vmax_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vmax_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmaxu.c index 7eea8a6e2546..0508f79fd36c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmaxu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmaxu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmaxu_vv_u8mf8(op1, op2, vl); + return __riscv_vmaxu_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf8(op1, op2, vl); + return __riscv_vmaxu_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmaxu_vv_u8mf4(op1, op2, vl); + return __riscv_vmaxu_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf4(op1, op2, vl); + return __riscv_vmaxu_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmaxu_vv_u8mf2(op1, op2, vl); + return __riscv_vmaxu_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf2(op1, op2, vl); + return __riscv_vmaxu_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmaxu_vv_u8m1(op1, op2, vl); + return __riscv_vmaxu_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m1(op1, op2, vl); + return __riscv_vmaxu_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmaxu_vv_u8m2(op1, op2, vl); + return __riscv_vmaxu_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m2(op1, op2, vl); + return __riscv_vmaxu_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmaxu_vv_u8m4(op1, op2, vl); + return __riscv_vmaxu_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m4(op1, op2, vl); + return __riscv_vmaxu_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmaxu_vv_u8m8(op1, op2, vl); + return __riscv_vmaxu_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m8(op1, op2, vl); + return __riscv_vmaxu_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmaxu_vv_u16mf4(op1, op2, vl); + return __riscv_vmaxu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf4(op1, op2, vl); + return __riscv_vmaxu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmaxu_vv_u16mf2(op1, op2, vl); + return __riscv_vmaxu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf2(op1, op2, vl); + return __riscv_vmaxu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmaxu_vv_u16m1(op1, op2, vl); + return __riscv_vmaxu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m1(op1, op2, vl); + return __riscv_vmaxu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmaxu_vv_u16m2(op1, op2, vl); + return __riscv_vmaxu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m2(op1, op2, vl); + return __riscv_vmaxu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmaxu_vv_u16m4(op1, op2, vl); + return __riscv_vmaxu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m4(op1, op2, vl); + return __riscv_vmaxu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmaxu_vv_u16m8(op1, op2, vl); + return __riscv_vmaxu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m8(op1, op2, vl); + return __riscv_vmaxu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmaxu_vv_u32mf2(op1, op2, vl); + return __riscv_vmaxu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32mf2(op1, op2, vl); + return __riscv_vmaxu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmaxu_vv_u32m1(op1, op2, vl); + return __riscv_vmaxu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m1(op1, op2, vl); + return __riscv_vmaxu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmaxu_vv_u32m2(op1, op2, vl); + return __riscv_vmaxu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m2(op1, op2, vl); + return __riscv_vmaxu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmaxu_vv_u32m4(op1, op2, vl); + return __riscv_vmaxu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m4(op1, op2, vl); + return __riscv_vmaxu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmaxu_vv_u32m8(op1, op2, vl); + return __riscv_vmaxu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m8(op1, op2, vl); + return __riscv_vmaxu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1( @@ -336,7 +336,7 @@ vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmaxu_vv_u64m1(op1, op2, vl); + return __riscv_vmaxu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1( @@ -345,7 +345,7 @@ vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m1(op1, op2, vl); + return __riscv_vmaxu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2( @@ -354,7 +354,7 @@ vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmaxu_vv_u64m2(op1, op2, vl); + return __riscv_vmaxu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2( @@ -363,7 +363,7 @@ vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m2(op1, op2, vl); + return __riscv_vmaxu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4( @@ -372,7 +372,7 @@ vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmaxu_vv_u64m4(op1, op2, vl); + return __riscv_vmaxu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4( @@ -381,7 +381,7 @@ vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m4(op1, op2, vl); + return __riscv_vmaxu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8( @@ -390,7 +390,7 @@ vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmaxu_vv_u64m8(op1, op2, vl); + return __riscv_vmaxu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m8(op1, op2, vl); + return __riscv_vmaxu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmaxu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m( @@ -417,7 +417,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m( @@ -426,7 +426,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmaxu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m( @@ -435,7 +435,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m( @@ -444,7 +444,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmaxu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m( @@ -453,7 +453,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m( @@ -462,7 +462,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmaxu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m( @@ -471,7 +471,7 @@ vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m( @@ -480,7 +480,7 @@ vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmaxu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m( @@ -489,7 +489,7 @@ vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m( @@ -498,7 +498,7 @@ vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmaxu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m( @@ -507,7 +507,7 @@ vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m( @@ -516,7 +516,7 @@ vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmaxu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m( @@ -525,7 +525,7 @@ vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m( @@ -534,7 +534,7 @@ vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmaxu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m( @@ -543,7 +543,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m( @@ -552,7 +552,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmaxu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m( @@ -561,7 +561,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m( @@ -570,7 +570,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmaxu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m( @@ -579,7 +579,7 @@ vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m( @@ -588,7 +588,7 @@ vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmaxu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m( @@ -597,7 +597,7 @@ vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m( @@ -606,7 +606,7 @@ vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmaxu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m( @@ -615,7 +615,7 @@ vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m( @@ -624,7 +624,7 @@ vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmaxu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m( @@ -633,7 +633,7 @@ vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m( @@ -642,7 +642,7 @@ vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmaxu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m( @@ -651,7 +651,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m( @@ -660,7 +660,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmaxu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m( @@ -669,7 +669,7 @@ vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m( @@ -678,7 +678,7 @@ vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmaxu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m( @@ -687,7 +687,7 @@ vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m( @@ -696,7 +696,7 @@ vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmaxu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m( @@ -705,7 +705,7 @@ vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m( @@ -714,7 +714,7 @@ vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmaxu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m( @@ -723,7 +723,7 @@ vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m( @@ -732,7 +732,7 @@ vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmaxu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m( @@ -741,7 +741,7 @@ vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m( @@ -750,7 +750,7 @@ vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmaxu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m( @@ -759,7 +759,7 @@ vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m( @@ -768,7 +768,7 @@ vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmaxu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m( @@ -777,7 +777,7 @@ vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m( @@ -786,7 +786,7 @@ vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmaxu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vmaxu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vmaxu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmclr.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmclr.c index 0252c1c79eed..70a6a06c5303 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmclr.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmclr.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmclr_m_b1(size_t vl) { - return vmclr_m_b1(vl); + return __riscv_vmclr_m_b1(vl); } // CHECK-RV64-LABEL: @test_vmclr_m_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmclr_m_b1(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmclr_m_b2(size_t vl) { - return vmclr_m_b2(vl); + return __riscv_vmclr_m_b2(vl); } // CHECK-RV64-LABEL: @test_vmclr_m_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmclr_m_b2(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmclr_m_b4(size_t vl) { - return vmclr_m_b4(vl); + return __riscv_vmclr_m_b4(vl); } // CHECK-RV64-LABEL: @test_vmclr_m_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmclr_m_b4(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmclr_m_b8(size_t vl) { - return vmclr_m_b8(vl); + return __riscv_vmclr_m_b8(vl); } // CHECK-RV64-LABEL: @test_vmclr_m_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmclr_m_b8(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmclr_m_b16(size_t vl) { - return vmclr_m_b16(vl); + return __riscv_vmclr_m_b16(vl); } // CHECK-RV64-LABEL: @test_vmclr_m_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmclr_m_b16(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmclr_m_b32(size_t vl) { - return vmclr_m_b32(vl); + return __riscv_vmclr_m_b32(vl); } // CHECK-RV64-LABEL: @test_vmclr_m_b64( @@ -66,6 +66,6 @@ vbool32_t test_vmclr_m_b32(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmclr_m_b64(size_t vl) { - return vmclr_m_b64(vl); + return __riscv_vmclr_m_b64(vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c index 6ca7cd649401..07d9e82cdb0f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_i8mf8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8( @@ -22,7 +22,7 @@ vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_i8mf8(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4( @@ -31,7 +31,7 @@ vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t mask, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_i8mf4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4( @@ -40,7 +40,7 @@ vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_i8mf4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2( @@ -49,7 +49,7 @@ vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t mask, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_i8mf2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2( @@ -58,7 +58,7 @@ vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_i8mf2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1( @@ -67,7 +67,7 @@ vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t mask, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_i8m1(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1( @@ -76,7 +76,7 @@ vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_i8m1(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2( @@ -85,7 +85,7 @@ vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_i8m2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2( @@ -94,7 +94,7 @@ vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_i8m2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4( @@ -103,7 +103,7 @@ vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { - return vmerge_vvm_i8m4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4( @@ -112,7 +112,7 @@ vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { - return vmerge_vxm_i8m4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8( @@ -121,7 +121,7 @@ vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { - return vmerge_vvm_i8m8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8( @@ -130,7 +130,7 @@ vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { - return vmerge_vxm_i8m8(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4( @@ -139,7 +139,7 @@ vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_i16mf4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4( @@ -148,7 +148,7 @@ vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_i16mf4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2( @@ -157,7 +157,7 @@ vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_i16mf2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2( @@ -166,7 +166,7 @@ vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_i16mf2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1( @@ -175,7 +175,7 @@ vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_i16m1(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1( @@ -184,7 +184,7 @@ vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_i16m1(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2( @@ -193,7 +193,7 @@ vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t mask, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_i16m2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2( @@ -202,7 +202,7 @@ vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_i16m2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4( @@ -211,7 +211,7 @@ vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t mask, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_i16m4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4( @@ -220,7 +220,7 @@ vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_i16m4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8( @@ -229,7 +229,7 @@ vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t mask, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { - return vmerge_vvm_i16m8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8( @@ -238,7 +238,7 @@ vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { - return vmerge_vxm_i16m8(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2( @@ -247,7 +247,7 @@ vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t mask, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_i32mf2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2( @@ -256,7 +256,7 @@ vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_i32mf2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1( @@ -265,7 +265,7 @@ vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_i32m1(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1( @@ -274,7 +274,7 @@ vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_i32m1(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2( @@ -283,7 +283,7 @@ vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t mask, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_i32m2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2( @@ -292,7 +292,7 @@ vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_i32m2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4( @@ -301,7 +301,7 @@ vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t mask, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_i32m4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4( @@ -310,7 +310,7 @@ vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_i32m4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8( @@ -319,7 +319,7 @@ vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t mask, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_i32m8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8( @@ -328,7 +328,7 @@ vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_i32m8(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1( @@ -337,7 +337,7 @@ vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t mask, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_i64m1(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1( @@ -346,7 +346,7 @@ vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_i64m1(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2( @@ -355,7 +355,7 @@ vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t mask, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_i64m2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2( @@ -364,7 +364,7 @@ vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_i64m2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4( @@ -373,7 +373,7 @@ vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t mask, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_i64m4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4( @@ -382,7 +382,7 @@ vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_i64m4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8( @@ -391,7 +391,7 @@ vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t mask, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_i64m8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_i64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8( @@ -400,7 +400,7 @@ vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_i64m8(op1, op2, mask, vl); + return __riscv_vmerge_vxm_i64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8( @@ -409,7 +409,7 @@ vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t mask, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_u8mf8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8( @@ -418,7 +418,7 @@ vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_u8mf8(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8mf8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4( @@ -427,7 +427,7 @@ vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_u8mf4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4( @@ -436,7 +436,7 @@ vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_u8mf4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2( @@ -445,7 +445,7 @@ vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_u8mf2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2( @@ -454,7 +454,7 @@ vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_u8mf2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1( @@ -463,7 +463,7 @@ vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_u8m1(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1( @@ -472,7 +472,7 @@ vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_u8m1(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2( @@ -481,7 +481,7 @@ vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_u8m2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2( @@ -490,7 +490,7 @@ vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_u8m2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4( @@ -499,7 +499,7 @@ vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { - return vmerge_vvm_u8m4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4( @@ -508,7 +508,7 @@ vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { - return vmerge_vxm_u8m4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8( @@ -517,7 +517,7 @@ vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { - return vmerge_vvm_u8m8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8( @@ -526,7 +526,7 @@ vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { - return vmerge_vxm_u8m8(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4( @@ -535,7 +535,7 @@ vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_u16mf4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4( @@ -544,7 +544,7 @@ vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_u16mf4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2( @@ -553,7 +553,7 @@ vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_u16mf2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2( @@ -562,7 +562,7 @@ vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_u16mf2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1( @@ -571,7 +571,7 @@ vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_u16m1(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1( @@ -580,7 +580,7 @@ vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_u16m1(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2( @@ -589,7 +589,7 @@ vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_u16m2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2( @@ -598,7 +598,7 @@ vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_u16m2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4( @@ -607,7 +607,7 @@ vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_u16m4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4( @@ -616,7 +616,7 @@ vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_u16m4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8( @@ -625,7 +625,7 @@ vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { - return vmerge_vvm_u16m8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8( @@ -634,7 +634,7 @@ vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { - return vmerge_vxm_u16m8(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2( @@ -643,7 +643,7 @@ vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_u32mf2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2( @@ -652,7 +652,7 @@ vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_u32mf2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1( @@ -661,7 +661,7 @@ vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_u32m1(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1( @@ -670,7 +670,7 @@ vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_u32m1(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2( @@ -679,7 +679,7 @@ vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_u32m2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2( @@ -688,7 +688,7 @@ vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_u32m2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4( @@ -697,7 +697,7 @@ vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_u32m4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4( @@ -706,7 +706,7 @@ vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_u32m4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8( @@ -715,7 +715,7 @@ vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_u32m8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8( @@ -724,7 +724,7 @@ vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_u32m8(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1( @@ -733,7 +733,7 @@ vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_u64m1(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1( @@ -742,7 +742,7 @@ vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_u64m1(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2( @@ -751,7 +751,7 @@ vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_u64m2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2( @@ -760,7 +760,7 @@ vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_u64m2(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4( @@ -769,7 +769,7 @@ vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_u64m4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4( @@ -778,7 +778,7 @@ vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_u64m4(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8( @@ -787,7 +787,7 @@ vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_u64m8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_u64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8( @@ -796,7 +796,7 @@ vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_u64m8(op1, op2, mask, vl); + return __riscv_vmerge_vxm_u64m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4( @@ -805,7 +805,7 @@ vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t mask, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_f16mf4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16mf4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2( @@ -814,7 +814,7 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_f16mf2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1( @@ -823,7 +823,7 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_f16m1(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2( @@ -832,7 +832,7 @@ vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_f16m2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4( @@ -841,7 +841,7 @@ vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_f16m4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8( @@ -850,7 +850,7 @@ vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { - return vmerge_vvm_f16m8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2( @@ -859,7 +859,7 @@ vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_f32mf2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f32mf2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1( @@ -868,7 +868,7 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_f32m1(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f32m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2( @@ -877,7 +877,7 @@ vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_f32m2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f32m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4( @@ -886,7 +886,7 @@ vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_f32m4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f32m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8( @@ -895,7 +895,7 @@ vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_f32m8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f32m8(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1( @@ -904,7 +904,7 @@ vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_f64m1(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f64m1(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2( @@ -913,7 +913,7 @@ vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_f64m2(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f64m2(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4( @@ -922,7 +922,7 @@ vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_f64m4(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f64m4(op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8( @@ -931,6 +931,6 @@ vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_f64m8(op1, op2, mask, vl); + return __riscv_vmerge_vvm_f64m8(op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq.c index 723d4110238d..0fb3a192e1b4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfeq.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfeq_vv_f16mf4_b64(op1, op2, vl); + return __riscv_vmfeq_vv_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmfeq_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf4_b64(op1, op2, vl); + return __riscv_vmfeq_vf_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmfeq_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfeq_vv_f16mf2_b32(op1, op2, vl); + return __riscv_vmfeq_vv_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmfeq_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf2_b32(op1, op2, vl); + return __riscv_vmfeq_vf_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmfeq_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfeq_vv_f16m1_b16(op1, op2, vl); + return __riscv_vmfeq_vv_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmfeq_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m1_b16(op1, op2, vl); + return __riscv_vmfeq_vf_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmfeq_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfeq_vv_f16m2_b8(op1, op2, vl); + return __riscv_vmfeq_vv_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmfeq_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m2_b8(op1, op2, vl); + return __riscv_vmfeq_vf_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmfeq_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfeq_vv_f16m4_b4(op1, op2, vl); + return __riscv_vmfeq_vv_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmfeq_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m4_b4(op1, op2, vl); + return __riscv_vmfeq_vf_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmfeq_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfeq_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfeq_vv_f16m8_b2(op1, op2, vl); + return __riscv_vmfeq_vv_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmfeq_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfeq_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m8_b2(op1, op2, vl); + return __riscv_vmfeq_vf_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64( @@ -121,7 +121,7 @@ vbool2_t test_vmfeq_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfeq_vv_f32mf2_b64(op1, op2, vl); + return __riscv_vmfeq_vv_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64( @@ -130,7 +130,7 @@ vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { - return vmfeq_vf_f32mf2_b64(op1, op2, vl); + return __riscv_vmfeq_vf_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32( @@ -139,7 +139,7 @@ vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfeq_vv_f32m1_b32(op1, op2, vl); + return __riscv_vmfeq_vv_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32( @@ -148,7 +148,7 @@ vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m1_b32(op1, op2, vl); + return __riscv_vmfeq_vf_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16( @@ -157,7 +157,7 @@ vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfeq_vv_f32m2_b16(op1, op2, vl); + return __riscv_vmfeq_vv_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16( @@ -166,7 +166,7 @@ vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m2_b16(op1, op2, vl); + return __riscv_vmfeq_vf_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8( @@ -175,7 +175,7 @@ vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfeq_vv_f32m4_b8(op1, op2, vl); + return __riscv_vmfeq_vv_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8( @@ -184,7 +184,7 @@ vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m4_b8(op1, op2, vl); + return __riscv_vmfeq_vf_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4( @@ -193,7 +193,7 @@ vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfeq_vv_f32m8_b4(op1, op2, vl); + return __riscv_vmfeq_vv_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4( @@ -202,7 +202,7 @@ vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m8_b4(op1, op2, vl); + return __riscv_vmfeq_vf_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64( @@ -211,7 +211,7 @@ vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfeq_vv_f64m1_b64(op1, op2, vl); + return __riscv_vmfeq_vv_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64( @@ -220,7 +220,7 @@ vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m1_b64(op1, op2, vl); + return __riscv_vmfeq_vf_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32( @@ -229,7 +229,7 @@ vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfeq_vv_f64m2_b32(op1, op2, vl); + return __riscv_vmfeq_vv_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32( @@ -238,7 +238,7 @@ vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m2_b32(op1, op2, vl); + return __riscv_vmfeq_vf_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16( @@ -247,7 +247,7 @@ vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfeq_vv_f64m4_b16(op1, op2, vl); + return __riscv_vmfeq_vv_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16( @@ -256,7 +256,7 @@ vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m4_b16(op1, op2, vl); + return __riscv_vmfeq_vf_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8( @@ -265,7 +265,7 @@ vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfeq_vv_f64m8_b8(op1, op2, vl); + return __riscv_vmfeq_vv_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8( @@ -274,7 +274,7 @@ vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m8_b8(op1, op2, vl); + return __riscv_vmfeq_vf_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64_m( @@ -283,7 +283,7 @@ vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfeq_vv_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64_m( @@ -292,7 +292,7 @@ vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32_m( @@ -301,7 +301,7 @@ vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfeq_vv_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32_m( @@ -310,7 +310,7 @@ vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16_m( @@ -319,7 +319,7 @@ vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfeq_vv_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16_m( @@ -328,7 +328,7 @@ vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8_m( @@ -337,7 +337,7 @@ vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfeq_vv_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8_m( @@ -346,7 +346,7 @@ vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4_m( @@ -355,7 +355,7 @@ vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfeq_vv_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4_m( @@ -364,7 +364,7 @@ vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2_m( @@ -373,7 +373,7 @@ vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfeq_vv_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2_m( @@ -382,7 +382,7 @@ vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m( @@ -391,7 +391,7 @@ vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfeq_vv_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m( @@ -400,7 +400,7 @@ vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfeq_vf_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m( @@ -409,7 +409,7 @@ vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfeq_vv_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m( @@ -418,7 +418,7 @@ vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m( @@ -427,7 +427,7 @@ vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfeq_vv_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m( @@ -436,7 +436,7 @@ vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m( @@ -445,7 +445,7 @@ vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfeq_vv_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m( @@ -454,7 +454,7 @@ vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m( @@ -463,7 +463,7 @@ vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfeq_vv_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m( @@ -472,7 +472,7 @@ vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m( @@ -481,7 +481,7 @@ vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfeq_vv_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m( @@ -490,7 +490,7 @@ vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m( @@ -499,7 +499,7 @@ vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfeq_vv_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m( @@ -508,7 +508,7 @@ vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m( @@ -517,7 +517,7 @@ vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfeq_vv_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m( @@ -526,7 +526,7 @@ vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m( @@ -535,7 +535,7 @@ vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfeq_vv_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmfeq_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m( @@ -544,6 +544,6 @@ vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmfeq_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge.c index 1074cad92049..3bbfb865b6db 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfge.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfge_vv_f16mf4_b64(op1, op2, vl); + return __riscv_vmfge_vv_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmfge_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf4_b64(op1, op2, vl); + return __riscv_vmfge_vf_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmfge_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfge_vv_f16mf2_b32(op1, op2, vl); + return __riscv_vmfge_vv_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmfge_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf2_b32(op1, op2, vl); + return __riscv_vmfge_vf_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmfge_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfge_vv_f16m1_b16(op1, op2, vl); + return __riscv_vmfge_vv_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmfge_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m1_b16(op1, op2, vl); + return __riscv_vmfge_vf_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmfge_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfge_vv_f16m2_b8(op1, op2, vl); + return __riscv_vmfge_vv_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmfge_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m2_b8(op1, op2, vl); + return __riscv_vmfge_vf_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmfge_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfge_vv_f16m4_b4(op1, op2, vl); + return __riscv_vmfge_vv_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmfge_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m4_b4(op1, op2, vl); + return __riscv_vmfge_vf_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmfge_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfge_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfge_vv_f16m8_b2(op1, op2, vl); + return __riscv_vmfge_vv_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmfge_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfge_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m8_b2(op1, op2, vl); + return __riscv_vmfge_vf_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64( @@ -121,7 +121,7 @@ vbool2_t test_vmfge_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfge_vv_f32mf2_b64(op1, op2, vl); + return __riscv_vmfge_vv_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64( @@ -130,7 +130,7 @@ vbool64_t test_vmfge_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { - return vmfge_vf_f32mf2_b64(op1, op2, vl); + return __riscv_vmfge_vf_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32( @@ -139,7 +139,7 @@ vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfge_vv_f32m1_b32(op1, op2, vl); + return __riscv_vmfge_vv_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32( @@ -148,7 +148,7 @@ vbool32_t test_vmfge_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { - return vmfge_vf_f32m1_b32(op1, op2, vl); + return __riscv_vmfge_vf_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16( @@ -157,7 +157,7 @@ vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfge_vv_f32m2_b16(op1, op2, vl); + return __riscv_vmfge_vv_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16( @@ -166,7 +166,7 @@ vbool16_t test_vmfge_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { - return vmfge_vf_f32m2_b16(op1, op2, vl); + return __riscv_vmfge_vf_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8( @@ -175,7 +175,7 @@ vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfge_vv_f32m4_b8(op1, op2, vl); + return __riscv_vmfge_vv_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8( @@ -184,7 +184,7 @@ vbool8_t test_vmfge_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { - return vmfge_vf_f32m4_b8(op1, op2, vl); + return __riscv_vmfge_vf_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4( @@ -193,7 +193,7 @@ vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfge_vv_f32m8_b4(op1, op2, vl); + return __riscv_vmfge_vv_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4( @@ -202,7 +202,7 @@ vbool4_t test_vmfge_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { - return vmfge_vf_f32m8_b4(op1, op2, vl); + return __riscv_vmfge_vf_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64( @@ -211,7 +211,7 @@ vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfge_vv_f64m1_b64(op1, op2, vl); + return __riscv_vmfge_vv_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64( @@ -220,7 +220,7 @@ vbool64_t test_vmfge_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { - return vmfge_vf_f64m1_b64(op1, op2, vl); + return __riscv_vmfge_vf_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32( @@ -229,7 +229,7 @@ vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfge_vv_f64m2_b32(op1, op2, vl); + return __riscv_vmfge_vv_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32( @@ -238,7 +238,7 @@ vbool32_t test_vmfge_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { - return vmfge_vf_f64m2_b32(op1, op2, vl); + return __riscv_vmfge_vf_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16( @@ -247,7 +247,7 @@ vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfge_vv_f64m4_b16(op1, op2, vl); + return __riscv_vmfge_vv_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16( @@ -256,7 +256,7 @@ vbool16_t test_vmfge_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { - return vmfge_vf_f64m4_b16(op1, op2, vl); + return __riscv_vmfge_vf_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8( @@ -265,7 +265,7 @@ vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfge_vv_f64m8_b8(op1, op2, vl); + return __riscv_vmfge_vv_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8( @@ -274,7 +274,7 @@ vbool8_t test_vmfge_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { - return vmfge_vf_f64m8_b8(op1, op2, vl); + return __riscv_vmfge_vf_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64_m( @@ -283,7 +283,7 @@ vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfge_vv_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64_m( @@ -292,7 +292,7 @@ vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32_m( @@ -301,7 +301,7 @@ vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfge_vv_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32_m( @@ -310,7 +310,7 @@ vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16_m( @@ -319,7 +319,7 @@ vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfge_vv_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16_m( @@ -328,7 +328,7 @@ vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8_m( @@ -337,7 +337,7 @@ vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfge_vv_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8_m( @@ -346,7 +346,7 @@ vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4_m( @@ -355,7 +355,7 @@ vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfge_vv_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4_m( @@ -364,7 +364,7 @@ vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2_m( @@ -373,7 +373,7 @@ vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfge_vv_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2_m( @@ -382,7 +382,7 @@ vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_m( @@ -391,7 +391,7 @@ vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfge_vv_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m( @@ -400,7 +400,7 @@ vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfge_vf_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32_m( @@ -409,7 +409,7 @@ vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfge_vv_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m( @@ -418,7 +418,7 @@ vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vmfge_vf_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16_m( @@ -427,7 +427,7 @@ vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfge_vv_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m( @@ -436,7 +436,7 @@ vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vmfge_vf_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8_m( @@ -445,7 +445,7 @@ vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfge_vv_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m( @@ -454,7 +454,7 @@ vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vmfge_vf_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4_m( @@ -463,7 +463,7 @@ vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfge_vv_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m( @@ -472,7 +472,7 @@ vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vmfge_vf_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64_m( @@ -481,7 +481,7 @@ vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfge_vv_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m( @@ -490,7 +490,7 @@ vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vmfge_vf_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32_m( @@ -499,7 +499,7 @@ vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfge_vv_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m( @@ -508,7 +508,7 @@ vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vmfge_vf_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16_m( @@ -517,7 +517,7 @@ vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfge_vv_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m( @@ -526,7 +526,7 @@ vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vmfge_vf_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8_m( @@ -535,7 +535,7 @@ vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfge_vv_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmfge_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m( @@ -544,6 +544,6 @@ vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vmfge_vf_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmfge_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt.c index 4a13098a806f..6663a54a004b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfgt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfgt_vv_f16mf4_b64(op1, op2, vl); + return __riscv_vmfgt_vv_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmfgt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf4_b64(op1, op2, vl); + return __riscv_vmfgt_vf_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmfgt_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfgt_vv_f16mf2_b32(op1, op2, vl); + return __riscv_vmfgt_vv_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmfgt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf2_b32(op1, op2, vl); + return __riscv_vmfgt_vf_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmfgt_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfgt_vv_f16m1_b16(op1, op2, vl); + return __riscv_vmfgt_vv_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmfgt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m1_b16(op1, op2, vl); + return __riscv_vmfgt_vf_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmfgt_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfgt_vv_f16m2_b8(op1, op2, vl); + return __riscv_vmfgt_vv_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmfgt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m2_b8(op1, op2, vl); + return __riscv_vmfgt_vf_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmfgt_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfgt_vv_f16m4_b4(op1, op2, vl); + return __riscv_vmfgt_vv_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmfgt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m4_b4(op1, op2, vl); + return __riscv_vmfgt_vf_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmfgt_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfgt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfgt_vv_f16m8_b2(op1, op2, vl); + return __riscv_vmfgt_vv_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmfgt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfgt_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m8_b2(op1, op2, vl); + return __riscv_vmfgt_vf_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64( @@ -121,7 +121,7 @@ vbool2_t test_vmfgt_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfgt_vv_f32mf2_b64(op1, op2, vl); + return __riscv_vmfgt_vv_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64( @@ -130,7 +130,7 @@ vbool64_t test_vmfgt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { - return vmfgt_vf_f32mf2_b64(op1, op2, vl); + return __riscv_vmfgt_vf_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32( @@ -139,7 +139,7 @@ vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfgt_vv_f32m1_b32(op1, op2, vl); + return __riscv_vmfgt_vv_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32( @@ -148,7 +148,7 @@ vbool32_t test_vmfgt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m1_b32(op1, op2, vl); + return __riscv_vmfgt_vf_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16( @@ -157,7 +157,7 @@ vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfgt_vv_f32m2_b16(op1, op2, vl); + return __riscv_vmfgt_vv_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16( @@ -166,7 +166,7 @@ vbool16_t test_vmfgt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m2_b16(op1, op2, vl); + return __riscv_vmfgt_vf_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8( @@ -175,7 +175,7 @@ vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfgt_vv_f32m4_b8(op1, op2, vl); + return __riscv_vmfgt_vv_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8( @@ -184,7 +184,7 @@ vbool8_t test_vmfgt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m4_b8(op1, op2, vl); + return __riscv_vmfgt_vf_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4( @@ -193,7 +193,7 @@ vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfgt_vv_f32m8_b4(op1, op2, vl); + return __riscv_vmfgt_vv_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4( @@ -202,7 +202,7 @@ vbool4_t test_vmfgt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m8_b4(op1, op2, vl); + return __riscv_vmfgt_vf_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64( @@ -211,7 +211,7 @@ vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfgt_vv_f64m1_b64(op1, op2, vl); + return __riscv_vmfgt_vv_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64( @@ -220,7 +220,7 @@ vbool64_t test_vmfgt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m1_b64(op1, op2, vl); + return __riscv_vmfgt_vf_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32( @@ -229,7 +229,7 @@ vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfgt_vv_f64m2_b32(op1, op2, vl); + return __riscv_vmfgt_vv_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32( @@ -238,7 +238,7 @@ vbool32_t test_vmfgt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m2_b32(op1, op2, vl); + return __riscv_vmfgt_vf_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16( @@ -247,7 +247,7 @@ vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfgt_vv_f64m4_b16(op1, op2, vl); + return __riscv_vmfgt_vv_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16( @@ -256,7 +256,7 @@ vbool16_t test_vmfgt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m4_b16(op1, op2, vl); + return __riscv_vmfgt_vf_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8( @@ -265,7 +265,7 @@ vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfgt_vv_f64m8_b8(op1, op2, vl); + return __riscv_vmfgt_vv_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8( @@ -274,7 +274,7 @@ vbool8_t test_vmfgt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m8_b8(op1, op2, vl); + return __riscv_vmfgt_vf_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64_m( @@ -283,7 +283,7 @@ vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfgt_vv_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64_m( @@ -292,7 +292,7 @@ vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32_m( @@ -301,7 +301,7 @@ vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfgt_vv_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32_m( @@ -310,7 +310,7 @@ vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16_m( @@ -319,7 +319,7 @@ vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfgt_vv_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16_m( @@ -328,7 +328,7 @@ vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8_m( @@ -337,7 +337,7 @@ vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfgt_vv_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8_m( @@ -346,7 +346,7 @@ vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4_m( @@ -355,7 +355,7 @@ vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfgt_vv_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4_m( @@ -364,7 +364,7 @@ vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2_m( @@ -373,7 +373,7 @@ vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfgt_vv_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2_m( @@ -382,7 +382,7 @@ vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_m( @@ -391,7 +391,7 @@ vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfgt_vv_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m( @@ -400,7 +400,7 @@ vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfgt_vf_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32_m( @@ -409,7 +409,7 @@ vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfgt_vv_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m( @@ -418,7 +418,7 @@ vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16_m( @@ -427,7 +427,7 @@ vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfgt_vv_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m( @@ -436,7 +436,7 @@ vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8_m( @@ -445,7 +445,7 @@ vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfgt_vv_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m( @@ -454,7 +454,7 @@ vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4_m( @@ -463,7 +463,7 @@ vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfgt_vv_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m( @@ -472,7 +472,7 @@ vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64_m( @@ -481,7 +481,7 @@ vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfgt_vv_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m( @@ -490,7 +490,7 @@ vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32_m( @@ -499,7 +499,7 @@ vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfgt_vv_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m( @@ -508,7 +508,7 @@ vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16_m( @@ -517,7 +517,7 @@ vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfgt_vv_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m( @@ -526,7 +526,7 @@ vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8_m( @@ -535,7 +535,7 @@ vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfgt_vv_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmfgt_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m( @@ -544,6 +544,6 @@ vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmfgt_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle.c index 3c595196fb0c..85b1c4efd1aa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfle.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfle_vv_f16mf4_b64(op1, op2, vl); + return __riscv_vmfle_vv_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmfle_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf4_b64(op1, op2, vl); + return __riscv_vmfle_vf_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmfle_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfle_vv_f16mf2_b32(op1, op2, vl); + return __riscv_vmfle_vv_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmfle_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf2_b32(op1, op2, vl); + return __riscv_vmfle_vf_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmfle_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfle_vv_f16m1_b16(op1, op2, vl); + return __riscv_vmfle_vv_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmfle_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m1_b16(op1, op2, vl); + return __riscv_vmfle_vf_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmfle_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfle_vv_f16m2_b8(op1, op2, vl); + return __riscv_vmfle_vv_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmfle_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m2_b8(op1, op2, vl); + return __riscv_vmfle_vf_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmfle_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfle_vv_f16m4_b4(op1, op2, vl); + return __riscv_vmfle_vv_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmfle_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m4_b4(op1, op2, vl); + return __riscv_vmfle_vf_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmfle_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfle_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfle_vv_f16m8_b2(op1, op2, vl); + return __riscv_vmfle_vv_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmfle_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfle_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m8_b2(op1, op2, vl); + return __riscv_vmfle_vf_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64( @@ -121,7 +121,7 @@ vbool2_t test_vmfle_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfle_vv_f32mf2_b64(op1, op2, vl); + return __riscv_vmfle_vv_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64( @@ -130,7 +130,7 @@ vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { - return vmfle_vf_f32mf2_b64(op1, op2, vl); + return __riscv_vmfle_vf_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32( @@ -139,7 +139,7 @@ vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfle_vv_f32m1_b32(op1, op2, vl); + return __riscv_vmfle_vv_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32( @@ -148,7 +148,7 @@ vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { - return vmfle_vf_f32m1_b32(op1, op2, vl); + return __riscv_vmfle_vf_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16( @@ -157,7 +157,7 @@ vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfle_vv_f32m2_b16(op1, op2, vl); + return __riscv_vmfle_vv_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16( @@ -166,7 +166,7 @@ vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { - return vmfle_vf_f32m2_b16(op1, op2, vl); + return __riscv_vmfle_vf_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8( @@ -175,7 +175,7 @@ vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfle_vv_f32m4_b8(op1, op2, vl); + return __riscv_vmfle_vv_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8( @@ -184,7 +184,7 @@ vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { - return vmfle_vf_f32m4_b8(op1, op2, vl); + return __riscv_vmfle_vf_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4( @@ -193,7 +193,7 @@ vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfle_vv_f32m8_b4(op1, op2, vl); + return __riscv_vmfle_vv_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4( @@ -202,7 +202,7 @@ vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { - return vmfle_vf_f32m8_b4(op1, op2, vl); + return __riscv_vmfle_vf_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64( @@ -211,7 +211,7 @@ vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfle_vv_f64m1_b64(op1, op2, vl); + return __riscv_vmfle_vv_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64( @@ -220,7 +220,7 @@ vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { - return vmfle_vf_f64m1_b64(op1, op2, vl); + return __riscv_vmfle_vf_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32( @@ -229,7 +229,7 @@ vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfle_vv_f64m2_b32(op1, op2, vl); + return __riscv_vmfle_vv_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32( @@ -238,7 +238,7 @@ vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { - return vmfle_vf_f64m2_b32(op1, op2, vl); + return __riscv_vmfle_vf_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16( @@ -247,7 +247,7 @@ vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfle_vv_f64m4_b16(op1, op2, vl); + return __riscv_vmfle_vv_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16( @@ -256,7 +256,7 @@ vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { - return vmfle_vf_f64m4_b16(op1, op2, vl); + return __riscv_vmfle_vf_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8( @@ -265,7 +265,7 @@ vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfle_vv_f64m8_b8(op1, op2, vl); + return __riscv_vmfle_vv_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8( @@ -274,7 +274,7 @@ vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { - return vmfle_vf_f64m8_b8(op1, op2, vl); + return __riscv_vmfle_vf_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64_m( @@ -283,7 +283,7 @@ vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfle_vv_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64_m( @@ -292,7 +292,7 @@ vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32_m( @@ -301,7 +301,7 @@ vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfle_vv_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32_m( @@ -310,7 +310,7 @@ vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16_m( @@ -319,7 +319,7 @@ vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfle_vv_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16_m( @@ -328,7 +328,7 @@ vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8_m( @@ -337,7 +337,7 @@ vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfle_vv_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8_m( @@ -346,7 +346,7 @@ vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4_m( @@ -355,7 +355,7 @@ vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfle_vv_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4_m( @@ -364,7 +364,7 @@ vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2_m( @@ -373,7 +373,7 @@ vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfle_vv_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2_m( @@ -382,7 +382,7 @@ vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m( @@ -391,7 +391,7 @@ vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfle_vv_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m( @@ -400,7 +400,7 @@ vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfle_vf_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m( @@ -409,7 +409,7 @@ vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfle_vv_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m( @@ -418,7 +418,7 @@ vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vmfle_vf_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m( @@ -427,7 +427,7 @@ vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfle_vv_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m( @@ -436,7 +436,7 @@ vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vmfle_vf_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m( @@ -445,7 +445,7 @@ vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfle_vv_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m( @@ -454,7 +454,7 @@ vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vmfle_vf_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m( @@ -463,7 +463,7 @@ vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfle_vv_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m( @@ -472,7 +472,7 @@ vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vmfle_vf_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m( @@ -481,7 +481,7 @@ vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfle_vv_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m( @@ -490,7 +490,7 @@ vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vmfle_vf_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m( @@ -499,7 +499,7 @@ vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfle_vv_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m( @@ -508,7 +508,7 @@ vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vmfle_vf_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m( @@ -517,7 +517,7 @@ vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfle_vv_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m( @@ -526,7 +526,7 @@ vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vmfle_vf_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m( @@ -535,7 +535,7 @@ vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfle_vv_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmfle_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m( @@ -544,6 +544,6 @@ vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vmfle_vf_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmfle_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt.c index 44417baa4beb..7ea09a087875 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmflt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmflt_vv_f16mf4_b64(op1, op2, vl); + return __riscv_vmflt_vv_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmflt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf4_b64(op1, op2, vl); + return __riscv_vmflt_vf_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmflt_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmflt_vv_f16mf2_b32(op1, op2, vl); + return __riscv_vmflt_vv_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmflt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf2_b32(op1, op2, vl); + return __riscv_vmflt_vf_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmflt_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmflt_vv_f16m1_b16(op1, op2, vl); + return __riscv_vmflt_vv_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmflt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m1_b16(op1, op2, vl); + return __riscv_vmflt_vf_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmflt_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmflt_vv_f16m2_b8(op1, op2, vl); + return __riscv_vmflt_vv_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmflt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m2_b8(op1, op2, vl); + return __riscv_vmflt_vf_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmflt_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmflt_vv_f16m4_b4(op1, op2, vl); + return __riscv_vmflt_vv_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmflt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m4_b4(op1, op2, vl); + return __riscv_vmflt_vf_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmflt_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmflt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmflt_vv_f16m8_b2(op1, op2, vl); + return __riscv_vmflt_vv_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmflt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmflt_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m8_b2(op1, op2, vl); + return __riscv_vmflt_vf_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64( @@ -121,7 +121,7 @@ vbool2_t test_vmflt_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmflt_vv_f32mf2_b64(op1, op2, vl); + return __riscv_vmflt_vv_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64( @@ -130,7 +130,7 @@ vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { - return vmflt_vf_f32mf2_b64(op1, op2, vl); + return __riscv_vmflt_vf_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32( @@ -139,7 +139,7 @@ vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmflt_vv_f32m1_b32(op1, op2, vl); + return __riscv_vmflt_vv_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32( @@ -148,7 +148,7 @@ vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { - return vmflt_vf_f32m1_b32(op1, op2, vl); + return __riscv_vmflt_vf_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16( @@ -157,7 +157,7 @@ vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmflt_vv_f32m2_b16(op1, op2, vl); + return __riscv_vmflt_vv_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16( @@ -166,7 +166,7 @@ vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { - return vmflt_vf_f32m2_b16(op1, op2, vl); + return __riscv_vmflt_vf_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8( @@ -175,7 +175,7 @@ vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmflt_vv_f32m4_b8(op1, op2, vl); + return __riscv_vmflt_vv_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8( @@ -184,7 +184,7 @@ vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { - return vmflt_vf_f32m4_b8(op1, op2, vl); + return __riscv_vmflt_vf_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4( @@ -193,7 +193,7 @@ vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmflt_vv_f32m8_b4(op1, op2, vl); + return __riscv_vmflt_vv_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4( @@ -202,7 +202,7 @@ vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { - return vmflt_vf_f32m8_b4(op1, op2, vl); + return __riscv_vmflt_vf_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64( @@ -211,7 +211,7 @@ vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmflt_vv_f64m1_b64(op1, op2, vl); + return __riscv_vmflt_vv_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64( @@ -220,7 +220,7 @@ vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { - return vmflt_vf_f64m1_b64(op1, op2, vl); + return __riscv_vmflt_vf_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32( @@ -229,7 +229,7 @@ vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmflt_vv_f64m2_b32(op1, op2, vl); + return __riscv_vmflt_vv_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32( @@ -238,7 +238,7 @@ vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { - return vmflt_vf_f64m2_b32(op1, op2, vl); + return __riscv_vmflt_vf_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16( @@ -247,7 +247,7 @@ vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmflt_vv_f64m4_b16(op1, op2, vl); + return __riscv_vmflt_vv_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16( @@ -256,7 +256,7 @@ vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { - return vmflt_vf_f64m4_b16(op1, op2, vl); + return __riscv_vmflt_vf_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8( @@ -265,7 +265,7 @@ vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmflt_vv_f64m8_b8(op1, op2, vl); + return __riscv_vmflt_vv_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8( @@ -274,7 +274,7 @@ vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { - return vmflt_vf_f64m8_b8(op1, op2, vl); + return __riscv_vmflt_vf_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64_m( @@ -283,7 +283,7 @@ vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmflt_vv_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64_m( @@ -292,7 +292,7 @@ vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32_m( @@ -301,7 +301,7 @@ vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmflt_vv_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32_m( @@ -310,7 +310,7 @@ vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16_m( @@ -319,7 +319,7 @@ vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmflt_vv_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16_m( @@ -328,7 +328,7 @@ vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8_m( @@ -337,7 +337,7 @@ vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmflt_vv_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8_m( @@ -346,7 +346,7 @@ vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4_m( @@ -355,7 +355,7 @@ vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmflt_vv_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4_m( @@ -364,7 +364,7 @@ vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2_m( @@ -373,7 +373,7 @@ vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmflt_vv_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2_m( @@ -382,7 +382,7 @@ vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m( @@ -391,7 +391,7 @@ vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmflt_vv_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m( @@ -400,7 +400,7 @@ vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vmflt_vf_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m( @@ -409,7 +409,7 @@ vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmflt_vv_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m( @@ -418,7 +418,7 @@ vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vmflt_vf_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m( @@ -427,7 +427,7 @@ vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmflt_vv_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m( @@ -436,7 +436,7 @@ vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vmflt_vf_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m( @@ -445,7 +445,7 @@ vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmflt_vv_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m( @@ -454,7 +454,7 @@ vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vmflt_vf_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m( @@ -463,7 +463,7 @@ vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmflt_vv_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m( @@ -472,7 +472,7 @@ vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vmflt_vf_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m( @@ -481,7 +481,7 @@ vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmflt_vv_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m( @@ -490,7 +490,7 @@ vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vmflt_vf_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m( @@ -499,7 +499,7 @@ vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmflt_vv_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m( @@ -508,7 +508,7 @@ vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vmflt_vf_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m( @@ -517,7 +517,7 @@ vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmflt_vv_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m( @@ -526,7 +526,7 @@ vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vmflt_vf_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m( @@ -535,7 +535,7 @@ vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmflt_vv_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmflt_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m( @@ -544,6 +544,6 @@ vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vmflt_vf_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmflt_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne.c index 09b35196c58d..44a3019cf09d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmfne.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfne_vv_f16mf4_b64(op1, op2, vl); + return __riscv_vmfne_vv_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmfne_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf4_b64(op1, op2, vl); + return __riscv_vmfne_vf_f16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmfne_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfne_vv_f16mf2_b32(op1, op2, vl); + return __riscv_vmfne_vv_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmfne_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf2_b32(op1, op2, vl); + return __riscv_vmfne_vf_f16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmfne_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfne_vv_f16m1_b16(op1, op2, vl); + return __riscv_vmfne_vv_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmfne_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m1_b16(op1, op2, vl); + return __riscv_vmfne_vf_f16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmfne_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfne_vv_f16m2_b8(op1, op2, vl); + return __riscv_vmfne_vv_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmfne_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m2_b8(op1, op2, vl); + return __riscv_vmfne_vf_f16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmfne_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfne_vv_f16m4_b4(op1, op2, vl); + return __riscv_vmfne_vv_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmfne_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m4_b4(op1, op2, vl); + return __riscv_vmfne_vf_f16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmfne_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfne_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfne_vv_f16m8_b2(op1, op2, vl); + return __riscv_vmfne_vv_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmfne_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfne_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m8_b2(op1, op2, vl); + return __riscv_vmfne_vf_f16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64( @@ -121,7 +121,7 @@ vbool2_t test_vmfne_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfne_vv_f32mf2_b64(op1, op2, vl); + return __riscv_vmfne_vv_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64( @@ -130,7 +130,7 @@ vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { - return vmfne_vf_f32mf2_b64(op1, op2, vl); + return __riscv_vmfne_vf_f32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32( @@ -139,7 +139,7 @@ vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfne_vv_f32m1_b32(op1, op2, vl); + return __riscv_vmfne_vv_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32( @@ -148,7 +148,7 @@ vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { - return vmfne_vf_f32m1_b32(op1, op2, vl); + return __riscv_vmfne_vf_f32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16( @@ -157,7 +157,7 @@ vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfne_vv_f32m2_b16(op1, op2, vl); + return __riscv_vmfne_vv_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16( @@ -166,7 +166,7 @@ vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { - return vmfne_vf_f32m2_b16(op1, op2, vl); + return __riscv_vmfne_vf_f32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8( @@ -175,7 +175,7 @@ vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfne_vv_f32m4_b8(op1, op2, vl); + return __riscv_vmfne_vv_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8( @@ -184,7 +184,7 @@ vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { - return vmfne_vf_f32m4_b8(op1, op2, vl); + return __riscv_vmfne_vf_f32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4( @@ -193,7 +193,7 @@ vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfne_vv_f32m8_b4(op1, op2, vl); + return __riscv_vmfne_vv_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4( @@ -202,7 +202,7 @@ vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { - return vmfne_vf_f32m8_b4(op1, op2, vl); + return __riscv_vmfne_vf_f32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64( @@ -211,7 +211,7 @@ vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfne_vv_f64m1_b64(op1, op2, vl); + return __riscv_vmfne_vv_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64( @@ -220,7 +220,7 @@ vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { - return vmfne_vf_f64m1_b64(op1, op2, vl); + return __riscv_vmfne_vf_f64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32( @@ -229,7 +229,7 @@ vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfne_vv_f64m2_b32(op1, op2, vl); + return __riscv_vmfne_vv_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32( @@ -238,7 +238,7 @@ vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { - return vmfne_vf_f64m2_b32(op1, op2, vl); + return __riscv_vmfne_vf_f64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16( @@ -247,7 +247,7 @@ vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfne_vv_f64m4_b16(op1, op2, vl); + return __riscv_vmfne_vv_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16( @@ -256,7 +256,7 @@ vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { - return vmfne_vf_f64m4_b16(op1, op2, vl); + return __riscv_vmfne_vf_f64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8( @@ -265,7 +265,7 @@ vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfne_vv_f64m8_b8(op1, op2, vl); + return __riscv_vmfne_vv_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8( @@ -274,7 +274,7 @@ vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { - return vmfne_vf_f64m8_b8(op1, op2, vl); + return __riscv_vmfne_vf_f64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64_m( @@ -283,7 +283,7 @@ vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfne_vv_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64_m( @@ -292,7 +292,7 @@ vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32_m( @@ -301,7 +301,7 @@ vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfne_vv_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32_m( @@ -310,7 +310,7 @@ vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16_m( @@ -319,7 +319,7 @@ vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfne_vv_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16_m( @@ -328,7 +328,7 @@ vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8_m( @@ -337,7 +337,7 @@ vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfne_vv_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8_m( @@ -346,7 +346,7 @@ vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4_m( @@ -355,7 +355,7 @@ vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfne_vv_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4_m( @@ -364,7 +364,7 @@ vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2_m( @@ -373,7 +373,7 @@ vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfne_vv_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2_m( @@ -382,7 +382,7 @@ vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m( @@ -391,7 +391,7 @@ vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfne_vv_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m( @@ -400,7 +400,7 @@ vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfne_vf_f32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m( @@ -409,7 +409,7 @@ vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfne_vv_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m( @@ -418,7 +418,7 @@ vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return vmfne_vf_f32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m( @@ -427,7 +427,7 @@ vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfne_vv_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m( @@ -436,7 +436,7 @@ vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return vmfne_vf_f32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m( @@ -445,7 +445,7 @@ vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfne_vv_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m( @@ -454,7 +454,7 @@ vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return vmfne_vf_f32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m( @@ -463,7 +463,7 @@ vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfne_vv_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m( @@ -472,7 +472,7 @@ vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return vmfne_vf_f32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m( @@ -481,7 +481,7 @@ vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfne_vv_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m( @@ -490,7 +490,7 @@ vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return vmfne_vf_f64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m( @@ -499,7 +499,7 @@ vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfne_vv_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m( @@ -508,7 +508,7 @@ vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return vmfne_vf_f64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m( @@ -517,7 +517,7 @@ vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfne_vv_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m( @@ -526,7 +526,7 @@ vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return vmfne_vf_f64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m( @@ -535,7 +535,7 @@ vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfne_vv_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmfne_vv_f64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m( @@ -544,6 +544,6 @@ vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return vmfne_vf_f64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmfne_vf_f64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmin.c index dc160308dc8c..daec44572ddf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmin.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmin_vv_i8mf8(op1, op2, vl); + return __riscv_vmin_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf8(op1, op2, vl); + return __riscv_vmin_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmin_vv_i8mf4(op1, op2, vl); + return __riscv_vmin_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf4(op1, op2, vl); + return __riscv_vmin_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmin_vv_i8mf2(op1, op2, vl); + return __riscv_vmin_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf2(op1, op2, vl); + return __riscv_vmin_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmin_vv_i8m1(op1, op2, vl); + return __riscv_vmin_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m1(op1, op2, vl); + return __riscv_vmin_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmin_vv_i8m2(op1, op2, vl); + return __riscv_vmin_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m2(op1, op2, vl); + return __riscv_vmin_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmin_vv_i8m4(op1, op2, vl); + return __riscv_vmin_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m4(op1, op2, vl); + return __riscv_vmin_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmin_vv_i8m8(op1, op2, vl); + return __riscv_vmin_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m8(op1, op2, vl); + return __riscv_vmin_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmin_vv_i16mf4(op1, op2, vl); + return __riscv_vmin_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf4(op1, op2, vl); + return __riscv_vmin_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmin_vv_i16mf2(op1, op2, vl); + return __riscv_vmin_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf2(op1, op2, vl); + return __riscv_vmin_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmin_vv_i16m1(op1, op2, vl); + return __riscv_vmin_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m1(op1, op2, vl); + return __riscv_vmin_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmin_vv_i16m2(op1, op2, vl); + return __riscv_vmin_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m2(op1, op2, vl); + return __riscv_vmin_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmin_vv_i16m4(op1, op2, vl); + return __riscv_vmin_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m4(op1, op2, vl); + return __riscv_vmin_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmin_vv_i16m8(op1, op2, vl); + return __riscv_vmin_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m8(op1, op2, vl); + return __riscv_vmin_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmin_vv_i32mf2(op1, op2, vl); + return __riscv_vmin_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32mf2(op1, op2, vl); + return __riscv_vmin_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmin_vv_i32m1(op1, op2, vl); + return __riscv_vmin_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m1(op1, op2, vl); + return __riscv_vmin_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmin_vv_i32m2(op1, op2, vl); + return __riscv_vmin_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m2(op1, op2, vl); + return __riscv_vmin_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmin_vv_i32m4(op1, op2, vl); + return __riscv_vmin_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m4(op1, op2, vl); + return __riscv_vmin_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmin_vv_i32m8(op1, op2, vl); + return __riscv_vmin_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m8(op1, op2, vl); + return __riscv_vmin_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmin_vv_i64m1(op1, op2, vl); + return __riscv_vmin_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m1(op1, op2, vl); + return __riscv_vmin_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmin_vv_i64m2(op1, op2, vl); + return __riscv_vmin_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m2(op1, op2, vl); + return __riscv_vmin_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmin_vv_i64m4(op1, op2, vl); + return __riscv_vmin_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m4(op1, op2, vl); + return __riscv_vmin_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmin_vv_i64m8(op1, op2, vl); + return __riscv_vmin_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m8(op1, op2, vl); + return __riscv_vmin_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmin_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmin_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmin_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmin_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmin_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmin_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmin_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmin_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmin_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmin_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmin_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmin_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmin_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmin_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmin_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmin_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmin_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmin_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmin_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmin_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmin_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmin_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vmin_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vmin_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vminu.c index 4fb6f7267814..b1944defaa34 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vminu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vminu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vminu_vv_u8mf8(op1, op2, vl); + return __riscv_vminu_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf8(op1, op2, vl); + return __riscv_vminu_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vminu_vv_u8mf4(op1, op2, vl); + return __riscv_vminu_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf4(op1, op2, vl); + return __riscv_vminu_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vminu_vv_u8mf2(op1, op2, vl); + return __riscv_vminu_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf2(op1, op2, vl); + return __riscv_vminu_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vminu_vv_u8m1(op1, op2, vl); + return __riscv_vminu_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m1(op1, op2, vl); + return __riscv_vminu_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vminu_vv_u8m2(op1, op2, vl); + return __riscv_vminu_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m2(op1, op2, vl); + return __riscv_vminu_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vminu_vv_u8m4(op1, op2, vl); + return __riscv_vminu_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m4(op1, op2, vl); + return __riscv_vminu_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vminu_vv_u8m8(op1, op2, vl); + return __riscv_vminu_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m8(op1, op2, vl); + return __riscv_vminu_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vminu_vv_u16mf4(op1, op2, vl); + return __riscv_vminu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf4(op1, op2, vl); + return __riscv_vminu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vminu_vv_u16mf2(op1, op2, vl); + return __riscv_vminu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf2(op1, op2, vl); + return __riscv_vminu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vminu_vv_u16m1(op1, op2, vl); + return __riscv_vminu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m1(op1, op2, vl); + return __riscv_vminu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vminu_vv_u16m2(op1, op2, vl); + return __riscv_vminu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m2(op1, op2, vl); + return __riscv_vminu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vminu_vv_u16m4(op1, op2, vl); + return __riscv_vminu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m4(op1, op2, vl); + return __riscv_vminu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vminu_vv_u16m8(op1, op2, vl); + return __riscv_vminu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m8(op1, op2, vl); + return __riscv_vminu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vminu_vv_u32mf2(op1, op2, vl); + return __riscv_vminu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32mf2(op1, op2, vl); + return __riscv_vminu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vminu_vv_u32m1(op1, op2, vl); + return __riscv_vminu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m1(op1, op2, vl); + return __riscv_vminu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vminu_vv_u32m2(op1, op2, vl); + return __riscv_vminu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m2(op1, op2, vl); + return __riscv_vminu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vminu_vv_u32m4(op1, op2, vl); + return __riscv_vminu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m4(op1, op2, vl); + return __riscv_vminu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vminu_vv_u32m8(op1, op2, vl); + return __riscv_vminu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m8(op1, op2, vl); + return __riscv_vminu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m1( @@ -336,7 +336,7 @@ vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vminu_vv_u64m1(op1, op2, vl); + return __riscv_vminu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m1( @@ -345,7 +345,7 @@ vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m1(op1, op2, vl); + return __riscv_vminu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m2( @@ -354,7 +354,7 @@ vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vminu_vv_u64m2(op1, op2, vl); + return __riscv_vminu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m2( @@ -363,7 +363,7 @@ vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m2(op1, op2, vl); + return __riscv_vminu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m4( @@ -372,7 +372,7 @@ vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vminu_vv_u64m4(op1, op2, vl); + return __riscv_vminu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m4( @@ -381,7 +381,7 @@ vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m4(op1, op2, vl); + return __riscv_vminu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m8( @@ -390,7 +390,7 @@ vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vminu_vv_u64m8(op1, op2, vl); + return __riscv_vminu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m8(op1, op2, vl); + return __riscv_vminu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vminu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m( @@ -417,7 +417,7 @@ vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m( @@ -426,7 +426,7 @@ vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vminu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m( @@ -435,7 +435,7 @@ vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m( @@ -444,7 +444,7 @@ vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vminu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m( @@ -453,7 +453,7 @@ vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m( @@ -462,7 +462,7 @@ vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vminu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m( @@ -471,7 +471,7 @@ vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m( @@ -480,7 +480,7 @@ vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vminu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m( @@ -489,7 +489,7 @@ vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m( @@ -498,7 +498,7 @@ vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vminu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m( @@ -507,7 +507,7 @@ vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m( @@ -516,7 +516,7 @@ vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vminu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m( @@ -525,7 +525,7 @@ vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m( @@ -534,7 +534,7 @@ vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vminu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m( @@ -543,7 +543,7 @@ vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m( @@ -552,7 +552,7 @@ vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vminu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m( @@ -561,7 +561,7 @@ vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m( @@ -570,7 +570,7 @@ vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vminu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m( @@ -579,7 +579,7 @@ vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m( @@ -588,7 +588,7 @@ vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vminu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m( @@ -597,7 +597,7 @@ vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m( @@ -606,7 +606,7 @@ vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vminu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m( @@ -615,7 +615,7 @@ vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m( @@ -624,7 +624,7 @@ vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vminu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m( @@ -633,7 +633,7 @@ vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m( @@ -642,7 +642,7 @@ vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vminu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m( @@ -651,7 +651,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m( @@ -660,7 +660,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vminu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m( @@ -669,7 +669,7 @@ vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m( @@ -678,7 +678,7 @@ vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vminu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m( @@ -687,7 +687,7 @@ vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m( @@ -696,7 +696,7 @@ vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vminu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m( @@ -705,7 +705,7 @@ vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m( @@ -714,7 +714,7 @@ vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vminu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m( @@ -723,7 +723,7 @@ vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m( @@ -732,7 +732,7 @@ vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vminu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m( @@ -741,7 +741,7 @@ vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m( @@ -750,7 +750,7 @@ vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vminu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m( @@ -759,7 +759,7 @@ vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m( @@ -768,7 +768,7 @@ vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vminu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m( @@ -777,7 +777,7 @@ vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m( @@ -786,7 +786,7 @@ vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vminu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vminu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vminu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmmv.c index b318cba0f11a..4108bdced270 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmmv.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmmv_m_b1(vbool1_t op1, size_t vl) { - return vmmv_m_b1(op1, vl); + return __riscv_vmmv_m_b1(op1, vl); } // CHECK-RV64-LABEL: @test_vmmv_m_b2( @@ -22,7 +22,7 @@ vbool1_t test_vmmv_m_b1(vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmmv_m_b2(vbool2_t op1, size_t vl) { - return vmmv_m_b2(op1, vl); + return __riscv_vmmv_m_b2(op1, vl); } // CHECK-RV64-LABEL: @test_vmmv_m_b4( @@ -31,7 +31,7 @@ vbool2_t test_vmmv_m_b2(vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmmv_m_b4(vbool4_t op1, size_t vl) { - return vmmv_m_b4(op1, vl); + return __riscv_vmmv_m_b4(op1, vl); } // CHECK-RV64-LABEL: @test_vmmv_m_b8( @@ -40,7 +40,7 @@ vbool4_t test_vmmv_m_b4(vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmmv_m_b8(vbool8_t op1, size_t vl) { - return vmmv_m_b8(op1, vl); + return __riscv_vmmv_m_b8(op1, vl); } // CHECK-RV64-LABEL: @test_vmmv_m_b16( @@ -49,7 +49,7 @@ vbool8_t test_vmmv_m_b8(vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmmv_m_b16(vbool16_t op1, size_t vl) { - return vmmv_m_b16(op1, vl); + return __riscv_vmmv_m_b16(op1, vl); } // CHECK-RV64-LABEL: @test_vmmv_m_b32( @@ -58,7 +58,7 @@ vbool16_t test_vmmv_m_b16(vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmmv_m_b32(vbool32_t op1, size_t vl) { - return vmmv_m_b32(op1, vl); + return __riscv_vmmv_m_b32(op1, vl); } // CHECK-RV64-LABEL: @test_vmmv_m_b64( @@ -67,6 +67,6 @@ vbool32_t test_vmmv_m_b32(vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmmv_m_b64(vbool64_t op1, size_t vl) { - return vmmv_m_b64(op1, vl); + return __riscv_vmmv_m_b64(op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnand.c index b9a33f67e52f..4550b0619873 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnand.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmnand_mm_b1(op1, op2, vl); + return __riscv_vmnand_mm_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnand_mm_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmnand_mm_b2(op1, op2, vl); + return __riscv_vmnand_mm_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnand_mm_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmnand_mm_b4(op1, op2, vl); + return __riscv_vmnand_mm_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnand_mm_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmnand_mm_b8(op1, op2, vl); + return __riscv_vmnand_mm_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnand_mm_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmnand_mm_b16(op1, op2, vl); + return __riscv_vmnand_mm_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnand_mm_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmnand_mm_b32(op1, op2, vl); + return __riscv_vmnand_mm_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnand_mm_b64( @@ -66,6 +66,6 @@ vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmnand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmnand_mm_b64(op1, op2, vl); + return __riscv_vmnand_mm_b64(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnor.c index f8180878162b..f6c938d9d2b6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmnor_mm_b1(op1, op2, vl); + return __riscv_vmnor_mm_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnor_mm_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmnor_mm_b2(op1, op2, vl); + return __riscv_vmnor_mm_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnor_mm_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmnor_mm_b4(op1, op2, vl); + return __riscv_vmnor_mm_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnor_mm_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmnor_mm_b8(op1, op2, vl); + return __riscv_vmnor_mm_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnor_mm_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmnor_mm_b16(op1, op2, vl); + return __riscv_vmnor_mm_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnor_mm_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmnor_mm_b32(op1, op2, vl); + return __riscv_vmnor_mm_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmnor_mm_b64( @@ -66,6 +66,6 @@ vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmnor_mm_b64(op1, op2, vl); + return __riscv_vmnor_mm_b64(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnot.c index 13eccf1ea3dd..ad9541715361 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmnot.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmnot_m_b1(vbool1_t op1, size_t vl) { - return vmnot_m_b1(op1, vl); + return __riscv_vmnot_m_b1(op1, vl); } // CHECK-RV64-LABEL: @test_vmnot_m_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmnot_m_b1(vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmnot_m_b2(vbool2_t op1, size_t vl) { - return vmnot_m_b2(op1, vl); + return __riscv_vmnot_m_b2(op1, vl); } // CHECK-RV64-LABEL: @test_vmnot_m_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmnot_m_b2(vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmnot_m_b4(vbool4_t op1, size_t vl) { - return vmnot_m_b4(op1, vl); + return __riscv_vmnot_m_b4(op1, vl); } // CHECK-RV64-LABEL: @test_vmnot_m_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmnot_m_b4(vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmnot_m_b8(vbool8_t op1, size_t vl) { - return vmnot_m_b8(op1, vl); + return __riscv_vmnot_m_b8(op1, vl); } // CHECK-RV64-LABEL: @test_vmnot_m_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmnot_m_b8(vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmnot_m_b16(vbool16_t op1, size_t vl) { - return vmnot_m_b16(op1, vl); + return __riscv_vmnot_m_b16(op1, vl); } // CHECK-RV64-LABEL: @test_vmnot_m_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmnot_m_b16(vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmnot_m_b32(vbool32_t op1, size_t vl) { - return vmnot_m_b32(op1, vl); + return __riscv_vmnot_m_b32(op1, vl); } // CHECK-RV64-LABEL: @test_vmnot_m_b64( @@ -66,6 +66,6 @@ vbool32_t test_vmnot_m_b32(vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmnot_m_b64(vbool64_t op1, size_t vl) { - return vmnot_m_b64(op1, vl); + return __riscv_vmnot_m_b64(op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmor.c index 8533d795d646..a65fb4c08862 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmor_mm_b1(op1, op2, vl); + return __riscv_vmor_mm_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmor_mm_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmor_mm_b2(op1, op2, vl); + return __riscv_vmor_mm_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmor_mm_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmor_mm_b4(op1, op2, vl); + return __riscv_vmor_mm_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmor_mm_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmor_mm_b8(op1, op2, vl); + return __riscv_vmor_mm_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmor_mm_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmor_mm_b16(op1, op2, vl); + return __riscv_vmor_mm_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmor_mm_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmor_mm_b32(op1, op2, vl); + return __riscv_vmor_mm_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmor_mm_b64( @@ -66,6 +66,6 @@ vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmor_mm_b64(op1, op2, vl); + return __riscv_vmor_mm_b64(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmorn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmorn.c index 4bd8ee0376a3..5321d5685496 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmorn.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmorn.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmorn_mm_b1(op1, op2, vl); + return __riscv_vmorn_mm_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmorn_mm_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmorn_mm_b2(op1, op2, vl); + return __riscv_vmorn_mm_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmorn_mm_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmorn_mm_b4(op1, op2, vl); + return __riscv_vmorn_mm_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmorn_mm_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmorn_mm_b8(op1, op2, vl); + return __riscv_vmorn_mm_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmorn_mm_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmorn_mm_b16(op1, op2, vl); + return __riscv_vmorn_mm_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmorn_mm_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmorn_mm_b32(op1, op2, vl); + return __riscv_vmorn_mm_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmorn_mm_b64( @@ -66,6 +66,6 @@ vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmorn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmorn_mm_b64(op1, op2, vl); + return __riscv_vmorn_mm_b64(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbc.c index c6a6289b2793..239a77c01505 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbc.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vvm_i8mf8_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i8mf8_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf8_b64( @@ -21,7 +21,7 @@ vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vxm_i8mf8_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i8mf8_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf8_b64( @@ -30,7 +30,7 @@ vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsbc_vv_i8mf8_b64(op1, op2, vl); + return __riscv_vmsbc_vv_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf8_b64( @@ -39,7 +39,7 @@ vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsbc_vx_i8mf8_b64(op1, op2, vl); + return __riscv_vmsbc_vx_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf4_b32( @@ -48,7 +48,7 @@ vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vvm_i8mf4_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i8mf4_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf4_b32( @@ -57,7 +57,7 @@ vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vxm_i8mf4_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i8mf4_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf4_b32( @@ -66,7 +66,7 @@ vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsbc_vv_i8mf4_b32(op1, op2, vl); + return __riscv_vmsbc_vv_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf4_b32( @@ -75,7 +75,7 @@ vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsbc_vx_i8mf4_b32(op1, op2, vl); + return __riscv_vmsbc_vx_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf2_b16( @@ -84,7 +84,7 @@ vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vvm_i8mf2_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i8mf2_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf2_b16( @@ -93,7 +93,7 @@ vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vxm_i8mf2_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i8mf2_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf2_b16( @@ -102,7 +102,7 @@ vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsbc_vv_i8mf2_b16(op1, op2, vl); + return __riscv_vmsbc_vv_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf2_b16( @@ -111,7 +111,7 @@ vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsbc_vx_i8mf2_b16(op1, op2, vl); + return __riscv_vmsbc_vx_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m1_b8( @@ -120,7 +120,7 @@ vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vvm_i8m1_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i8m1_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m1_b8( @@ -129,7 +129,7 @@ vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vxm_i8m1_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i8m1_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m1_b8( @@ -138,7 +138,7 @@ vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsbc_vv_i8m1_b8(op1, op2, vl); + return __riscv_vmsbc_vv_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m1_b8( @@ -147,7 +147,7 @@ vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return vmsbc_vx_i8m1_b8(op1, op2, vl); + return __riscv_vmsbc_vx_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m2_b4( @@ -156,7 +156,7 @@ vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vvm_i8m2_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i8m2_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m2_b4( @@ -165,7 +165,7 @@ vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vxm_i8m2_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i8m2_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m2_b4( @@ -174,7 +174,7 @@ vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsbc_vv_i8m2_b4(op1, op2, vl); + return __riscv_vmsbc_vv_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m2_b4( @@ -183,7 +183,7 @@ vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return vmsbc_vx_i8m2_b4(op1, op2, vl); + return __riscv_vmsbc_vx_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m4_b2( @@ -192,7 +192,7 @@ vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) { - return vmsbc_vvm_i8m4_b2(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i8m4_b2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m4_b2( @@ -201,7 +201,7 @@ vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) { - return vmsbc_vxm_i8m4_b2(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i8m4_b2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m4_b2( @@ -210,7 +210,7 @@ vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsbc_vv_i8m4_b2(op1, op2, vl); + return __riscv_vmsbc_vv_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m4_b2( @@ -219,7 +219,7 @@ vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return vmsbc_vx_i8m4_b2(op1, op2, vl); + return __riscv_vmsbc_vx_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m8_b1( @@ -228,7 +228,7 @@ vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) { - return vmsbc_vvm_i8m8_b1(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i8m8_b1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m8_b1( @@ -237,7 +237,7 @@ vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) { - return vmsbc_vxm_i8m8_b1(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i8m8_b1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m8_b1( @@ -246,7 +246,7 @@ vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsbc_vv_i8m8_b1(op1, op2, vl); + return __riscv_vmsbc_vv_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m8_b1( @@ -255,7 +255,7 @@ vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return vmsbc_vx_i8m8_b1(op1, op2, vl); + return __riscv_vmsbc_vx_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf4_b64( @@ -264,7 +264,7 @@ vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vvm_i16mf4_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i16mf4_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf4_b64( @@ -273,7 +273,7 @@ vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vxm_i16mf4_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i16mf4_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf4_b64( @@ -282,7 +282,7 @@ vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsbc_vv_i16mf4_b64(op1, op2, vl); + return __riscv_vmsbc_vv_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf4_b64( @@ -291,7 +291,7 @@ vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsbc_vx_i16mf4_b64(op1, op2, vl); + return __riscv_vmsbc_vx_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf2_b32( @@ -300,7 +300,7 @@ vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vvm_i16mf2_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i16mf2_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf2_b32( @@ -309,7 +309,7 @@ vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vxm_i16mf2_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i16mf2_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf2_b32( @@ -318,7 +318,7 @@ vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsbc_vv_i16mf2_b32(op1, op2, vl); + return __riscv_vmsbc_vv_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf2_b32( @@ -327,7 +327,7 @@ vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsbc_vx_i16mf2_b32(op1, op2, vl); + return __riscv_vmsbc_vx_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m1_b16( @@ -336,7 +336,7 @@ vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vvm_i16m1_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i16m1_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m1_b16( @@ -345,7 +345,7 @@ vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vxm_i16m1_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i16m1_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m1_b16( @@ -354,7 +354,7 @@ vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsbc_vv_i16m1_b16(op1, op2, vl); + return __riscv_vmsbc_vv_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m1_b16( @@ -363,7 +363,7 @@ vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return vmsbc_vx_i16m1_b16(op1, op2, vl); + return __riscv_vmsbc_vx_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m2_b8( @@ -372,7 +372,7 @@ vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vvm_i16m2_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i16m2_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m2_b8( @@ -381,7 +381,7 @@ vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vxm_i16m2_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i16m2_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m2_b8( @@ -390,7 +390,7 @@ vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsbc_vv_i16m2_b8(op1, op2, vl); + return __riscv_vmsbc_vv_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m2_b8( @@ -399,7 +399,7 @@ vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return vmsbc_vx_i16m2_b8(op1, op2, vl); + return __riscv_vmsbc_vx_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m4_b4( @@ -408,7 +408,7 @@ vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vvm_i16m4_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i16m4_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m4_b4( @@ -417,7 +417,7 @@ vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vxm_i16m4_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i16m4_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m4_b4( @@ -426,7 +426,7 @@ vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsbc_vv_i16m4_b4(op1, op2, vl); + return __riscv_vmsbc_vv_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m4_b4( @@ -435,7 +435,7 @@ vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmsbc_vx_i16m4_b4(op1, op2, vl); + return __riscv_vmsbc_vx_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m8_b2( @@ -444,7 +444,7 @@ vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) { - return vmsbc_vvm_i16m8_b2(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i16m8_b2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m8_b2( @@ -453,7 +453,7 @@ vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) { - return vmsbc_vxm_i16m8_b2(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i16m8_b2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m8_b2( @@ -462,7 +462,7 @@ vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsbc_vv_i16m8_b2(op1, op2, vl); + return __riscv_vmsbc_vv_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m8_b2( @@ -471,7 +471,7 @@ vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return vmsbc_vx_i16m8_b2(op1, op2, vl); + return __riscv_vmsbc_vx_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32mf2_b64( @@ -480,7 +480,7 @@ vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vvm_i32mf2_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i32mf2_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32mf2_b64( @@ -489,7 +489,7 @@ vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vxm_i32mf2_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i32mf2_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i32mf2_b64( @@ -498,7 +498,7 @@ vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsbc_vv_i32mf2_b64(op1, op2, vl); + return __riscv_vmsbc_vv_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i32mf2_b64( @@ -507,7 +507,7 @@ vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsbc_vx_i32mf2_b64(op1, op2, vl); + return __riscv_vmsbc_vx_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m1_b32( @@ -516,7 +516,7 @@ vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vvm_i32m1_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i32m1_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m1_b32( @@ -525,7 +525,7 @@ vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vxm_i32m1_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i32m1_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m1_b32( @@ -534,7 +534,7 @@ vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsbc_vv_i32m1_b32(op1, op2, vl); + return __riscv_vmsbc_vv_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m1_b32( @@ -543,7 +543,7 @@ vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return vmsbc_vx_i32m1_b32(op1, op2, vl); + return __riscv_vmsbc_vx_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m2_b16( @@ -552,7 +552,7 @@ vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vvm_i32m2_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i32m2_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m2_b16( @@ -561,7 +561,7 @@ vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vxm_i32m2_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i32m2_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m2_b16( @@ -570,7 +570,7 @@ vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsbc_vv_i32m2_b16(op1, op2, vl); + return __riscv_vmsbc_vv_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m2_b16( @@ -579,7 +579,7 @@ vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return vmsbc_vx_i32m2_b16(op1, op2, vl); + return __riscv_vmsbc_vx_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m4_b8( @@ -588,7 +588,7 @@ vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vvm_i32m4_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i32m4_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m4_b8( @@ -597,7 +597,7 @@ vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vxm_i32m4_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i32m4_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m4_b8( @@ -606,7 +606,7 @@ vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsbc_vv_i32m4_b8(op1, op2, vl); + return __riscv_vmsbc_vv_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m4_b8( @@ -615,7 +615,7 @@ vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return vmsbc_vx_i32m4_b8(op1, op2, vl); + return __riscv_vmsbc_vx_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m8_b4( @@ -624,7 +624,7 @@ vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vvm_i32m8_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i32m8_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m8_b4( @@ -633,7 +633,7 @@ vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vxm_i32m8_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i32m8_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m8_b4( @@ -642,7 +642,7 @@ vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsbc_vv_i32m8_b4(op1, op2, vl); + return __riscv_vmsbc_vv_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m8_b4( @@ -651,7 +651,7 @@ vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return vmsbc_vx_i32m8_b4(op1, op2, vl); + return __riscv_vmsbc_vx_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m1_b64( @@ -660,7 +660,7 @@ vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vvm_i64m1_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i64m1_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m1_b64( @@ -669,7 +669,7 @@ vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vxm_i64m1_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i64m1_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m1_b64( @@ -678,7 +678,7 @@ vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsbc_vv_i64m1_b64(op1, op2, vl); + return __riscv_vmsbc_vv_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m1_b64( @@ -687,7 +687,7 @@ vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return vmsbc_vx_i64m1_b64(op1, op2, vl); + return __riscv_vmsbc_vx_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m2_b32( @@ -696,7 +696,7 @@ vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vvm_i64m2_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i64m2_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m2_b32( @@ -705,7 +705,7 @@ vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vxm_i64m2_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i64m2_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m2_b32( @@ -714,7 +714,7 @@ vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsbc_vv_i64m2_b32(op1, op2, vl); + return __riscv_vmsbc_vv_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m2_b32( @@ -723,7 +723,7 @@ vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return vmsbc_vx_i64m2_b32(op1, op2, vl); + return __riscv_vmsbc_vx_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m4_b16( @@ -732,7 +732,7 @@ vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vvm_i64m4_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i64m4_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m4_b16( @@ -741,7 +741,7 @@ vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vxm_i64m4_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i64m4_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m4_b16( @@ -750,7 +750,7 @@ vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsbc_vv_i64m4_b16(op1, op2, vl); + return __riscv_vmsbc_vv_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m4_b16( @@ -759,7 +759,7 @@ vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return vmsbc_vx_i64m4_b16(op1, op2, vl); + return __riscv_vmsbc_vx_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m8_b8( @@ -768,7 +768,7 @@ vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vvm_i64m8_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_i64m8_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m8_b8( @@ -777,7 +777,7 @@ vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrow // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vxm_i64m8_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_i64m8_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m8_b8( @@ -786,7 +786,7 @@ vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsbc_vv_i64m8_b8(op1, op2, vl); + return __riscv_vmsbc_vv_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m8_b8( @@ -795,7 +795,7 @@ vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmsbc_vx_i64m8_b8(op1, op2, vl); + return __riscv_vmsbc_vx_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf8_b64( @@ -804,7 +804,7 @@ vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vvm_u8mf8_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u8mf8_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf8_b64( @@ -813,7 +813,7 @@ vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vxm_u8mf8_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u8mf8_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf8_b64( @@ -822,7 +822,7 @@ vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t borro // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsbc_vv_u8mf8_b64(op1, op2, vl); + return __riscv_vmsbc_vv_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf8_b64( @@ -831,7 +831,7 @@ vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsbc_vx_u8mf8_b64(op1, op2, vl); + return __riscv_vmsbc_vx_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf4_b32( @@ -840,7 +840,7 @@ vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vvm_u8mf4_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u8mf4_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf4_b32( @@ -849,7 +849,7 @@ vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vxm_u8mf4_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u8mf4_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf4_b32( @@ -858,7 +858,7 @@ vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t borro // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsbc_vv_u8mf4_b32(op1, op2, vl); + return __riscv_vmsbc_vv_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf4_b32( @@ -867,7 +867,7 @@ vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsbc_vx_u8mf4_b32(op1, op2, vl); + return __riscv_vmsbc_vx_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf2_b16( @@ -876,7 +876,7 @@ vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vvm_u8mf2_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u8mf2_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf2_b16( @@ -885,7 +885,7 @@ vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vxm_u8mf2_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u8mf2_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf2_b16( @@ -894,7 +894,7 @@ vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t borro // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsbc_vv_u8mf2_b16(op1, op2, vl); + return __riscv_vmsbc_vv_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf2_b16( @@ -903,7 +903,7 @@ vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsbc_vx_u8mf2_b16(op1, op2, vl); + return __riscv_vmsbc_vx_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m1_b8( @@ -912,7 +912,7 @@ vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vvm_u8m1_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u8m1_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m1_b8( @@ -921,7 +921,7 @@ vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vxm_u8m1_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u8m1_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m1_b8( @@ -930,7 +930,7 @@ vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsbc_vv_u8m1_b8(op1, op2, vl); + return __riscv_vmsbc_vv_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m1_b8( @@ -939,7 +939,7 @@ vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsbc_vx_u8m1_b8(op1, op2, vl); + return __riscv_vmsbc_vx_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m2_b4( @@ -948,7 +948,7 @@ vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vvm_u8m2_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u8m2_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m2_b4( @@ -957,7 +957,7 @@ vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vxm_u8m2_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u8m2_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m2_b4( @@ -966,7 +966,7 @@ vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsbc_vv_u8m2_b4(op1, op2, vl); + return __riscv_vmsbc_vv_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m2_b4( @@ -975,7 +975,7 @@ vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsbc_vx_u8m2_b4(op1, op2, vl); + return __riscv_vmsbc_vx_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m4_b2( @@ -984,7 +984,7 @@ vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) { - return vmsbc_vvm_u8m4_b2(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u8m4_b2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m4_b2( @@ -993,7 +993,7 @@ vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) { - return vmsbc_vxm_u8m4_b2(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u8m4_b2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m4_b2( @@ -1002,7 +1002,7 @@ vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsbc_vv_u8m4_b2(op1, op2, vl); + return __riscv_vmsbc_vv_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m4_b2( @@ -1011,7 +1011,7 @@ vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsbc_vx_u8m4_b2(op1, op2, vl); + return __riscv_vmsbc_vx_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m8_b1( @@ -1020,7 +1020,7 @@ vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) { - return vmsbc_vvm_u8m8_b1(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u8m8_b1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m8_b1( @@ -1029,7 +1029,7 @@ vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) { - return vmsbc_vxm_u8m8_b1(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u8m8_b1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m8_b1( @@ -1038,7 +1038,7 @@ vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsbc_vv_u8m8_b1(op1, op2, vl); + return __riscv_vmsbc_vv_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m8_b1( @@ -1047,7 +1047,7 @@ vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsbc_vx_u8m8_b1(op1, op2, vl); + return __riscv_vmsbc_vx_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf4_b64( @@ -1056,7 +1056,7 @@ vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vvm_u16mf4_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u16mf4_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf4_b64( @@ -1065,7 +1065,7 @@ vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vxm_u16mf4_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u16mf4_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf4_b64( @@ -1074,7 +1074,7 @@ vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t bo // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsbc_vv_u16mf4_b64(op1, op2, vl); + return __riscv_vmsbc_vv_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf4_b64( @@ -1083,7 +1083,7 @@ vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsbc_vx_u16mf4_b64(op1, op2, vl); + return __riscv_vmsbc_vx_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf2_b32( @@ -1092,7 +1092,7 @@ vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vvm_u16mf2_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u16mf2_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf2_b32( @@ -1101,7 +1101,7 @@ vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vxm_u16mf2_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u16mf2_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf2_b32( @@ -1110,7 +1110,7 @@ vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t bo // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsbc_vv_u16mf2_b32(op1, op2, vl); + return __riscv_vmsbc_vv_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf2_b32( @@ -1119,7 +1119,7 @@ vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsbc_vx_u16mf2_b32(op1, op2, vl); + return __riscv_vmsbc_vx_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m1_b16( @@ -1128,7 +1128,7 @@ vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vvm_u16m1_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u16m1_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m1_b16( @@ -1137,7 +1137,7 @@ vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vxm_u16m1_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u16m1_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m1_b16( @@ -1146,7 +1146,7 @@ vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsbc_vv_u16m1_b16(op1, op2, vl); + return __riscv_vmsbc_vv_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m1_b16( @@ -1155,7 +1155,7 @@ vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsbc_vx_u16m1_b16(op1, op2, vl); + return __riscv_vmsbc_vx_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m2_b8( @@ -1164,7 +1164,7 @@ vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vvm_u16m2_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u16m2_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m2_b8( @@ -1173,7 +1173,7 @@ vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vxm_u16m2_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u16m2_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m2_b8( @@ -1182,7 +1182,7 @@ vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsbc_vv_u16m2_b8(op1, op2, vl); + return __riscv_vmsbc_vv_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m2_b8( @@ -1191,7 +1191,7 @@ vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsbc_vx_u16m2_b8(op1, op2, vl); + return __riscv_vmsbc_vx_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m4_b4( @@ -1200,7 +1200,7 @@ vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vvm_u16m4_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u16m4_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m4_b4( @@ -1209,7 +1209,7 @@ vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vxm_u16m4_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u16m4_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m4_b4( @@ -1218,7 +1218,7 @@ vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsbc_vv_u16m4_b4(op1, op2, vl); + return __riscv_vmsbc_vv_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m4_b4( @@ -1227,7 +1227,7 @@ vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsbc_vx_u16m4_b4(op1, op2, vl); + return __riscv_vmsbc_vx_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m8_b2( @@ -1236,7 +1236,7 @@ vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) { - return vmsbc_vvm_u16m8_b2(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u16m8_b2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m8_b2( @@ -1245,7 +1245,7 @@ vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) { - return vmsbc_vxm_u16m8_b2(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u16m8_b2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m8_b2( @@ -1254,7 +1254,7 @@ vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsbc_vv_u16m8_b2(op1, op2, vl); + return __riscv_vmsbc_vv_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m8_b2( @@ -1263,7 +1263,7 @@ vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsbc_vx_u16m8_b2(op1, op2, vl); + return __riscv_vmsbc_vx_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32mf2_b64( @@ -1272,7 +1272,7 @@ vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vvm_u32mf2_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u32mf2_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32mf2_b64( @@ -1281,7 +1281,7 @@ vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vxm_u32mf2_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u32mf2_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u32mf2_b64( @@ -1290,7 +1290,7 @@ vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t bo // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsbc_vv_u32mf2_b64(op1, op2, vl); + return __riscv_vmsbc_vv_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u32mf2_b64( @@ -1299,7 +1299,7 @@ vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsbc_vx_u32mf2_b64(op1, op2, vl); + return __riscv_vmsbc_vx_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m1_b32( @@ -1308,7 +1308,7 @@ vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vvm_u32m1_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u32m1_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m1_b32( @@ -1317,7 +1317,7 @@ vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vxm_u32m1_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u32m1_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m1_b32( @@ -1326,7 +1326,7 @@ vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsbc_vv_u32m1_b32(op1, op2, vl); + return __riscv_vmsbc_vv_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m1_b32( @@ -1335,7 +1335,7 @@ vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsbc_vx_u32m1_b32(op1, op2, vl); + return __riscv_vmsbc_vx_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m2_b16( @@ -1344,7 +1344,7 @@ vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vvm_u32m2_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u32m2_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m2_b16( @@ -1353,7 +1353,7 @@ vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vxm_u32m2_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u32m2_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m2_b16( @@ -1362,7 +1362,7 @@ vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsbc_vv_u32m2_b16(op1, op2, vl); + return __riscv_vmsbc_vv_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m2_b16( @@ -1371,7 +1371,7 @@ vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsbc_vx_u32m2_b16(op1, op2, vl); + return __riscv_vmsbc_vx_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m4_b8( @@ -1380,7 +1380,7 @@ vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vvm_u32m4_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u32m4_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m4_b8( @@ -1389,7 +1389,7 @@ vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vxm_u32m4_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u32m4_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m4_b8( @@ -1398,7 +1398,7 @@ vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsbc_vv_u32m4_b8(op1, op2, vl); + return __riscv_vmsbc_vv_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m4_b8( @@ -1407,7 +1407,7 @@ vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsbc_vx_u32m4_b8(op1, op2, vl); + return __riscv_vmsbc_vx_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m8_b4( @@ -1416,7 +1416,7 @@ vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vvm_u32m8_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u32m8_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m8_b4( @@ -1425,7 +1425,7 @@ vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) { - return vmsbc_vxm_u32m8_b4(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u32m8_b4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m8_b4( @@ -1434,7 +1434,7 @@ vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsbc_vv_u32m8_b4(op1, op2, vl); + return __riscv_vmsbc_vv_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m8_b4( @@ -1443,7 +1443,7 @@ vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsbc_vx_u32m8_b4(op1, op2, vl); + return __riscv_vmsbc_vx_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m1_b64( @@ -1452,7 +1452,7 @@ vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vvm_u64m1_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u64m1_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m1_b64( @@ -1461,7 +1461,7 @@ vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) { - return vmsbc_vxm_u64m1_b64(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u64m1_b64(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m1_b64( @@ -1470,7 +1470,7 @@ vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsbc_vv_u64m1_b64(op1, op2, vl); + return __riscv_vmsbc_vv_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m1_b64( @@ -1479,7 +1479,7 @@ vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsbc_vx_u64m1_b64(op1, op2, vl); + return __riscv_vmsbc_vx_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m2_b32( @@ -1488,7 +1488,7 @@ vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vvm_u64m2_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u64m2_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m2_b32( @@ -1497,7 +1497,7 @@ vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) { - return vmsbc_vxm_u64m2_b32(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u64m2_b32(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m2_b32( @@ -1506,7 +1506,7 @@ vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsbc_vv_u64m2_b32(op1, op2, vl); + return __riscv_vmsbc_vv_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m2_b32( @@ -1515,7 +1515,7 @@ vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsbc_vx_u64m2_b32(op1, op2, vl); + return __riscv_vmsbc_vx_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m4_b16( @@ -1524,7 +1524,7 @@ vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vvm_u64m4_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u64m4_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m4_b16( @@ -1533,7 +1533,7 @@ vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t b // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) { - return vmsbc_vxm_u64m4_b16(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u64m4_b16(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m4_b16( @@ -1542,7 +1542,7 @@ vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsbc_vv_u64m4_b16(op1, op2, vl); + return __riscv_vmsbc_vv_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m4_b16( @@ -1551,7 +1551,7 @@ vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsbc_vx_u64m4_b16(op1, op2, vl); + return __riscv_vmsbc_vx_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m8_b8( @@ -1560,7 +1560,7 @@ vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vvm_u64m8_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vvm_u64m8_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m8_b8( @@ -1569,7 +1569,7 @@ vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) { - return vmsbc_vxm_u64m8_b8(op1, op2, borrowin, vl); + return __riscv_vmsbc_vxm_u64m8_b8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m8_b8( @@ -1578,7 +1578,7 @@ vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsbc_vv_u64m8_b8(op1, op2, vl); + return __riscv_vmsbc_vv_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m8_b8( @@ -1587,6 +1587,6 @@ vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsbc_vx_u64m8_b8(op1, op2, vl); + return __riscv_vmsbc_vx_u64m8_b8(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbf.c index 5ac58aedc405..db767a781bab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsbf.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) { - return vmsbf_m_b1(op1, vl); + return __riscv_vmsbf_m_b1(op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) { - return vmsbf_m_b2(op1, vl); + return __riscv_vmsbf_m_b2(op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) { - return vmsbf_m_b4(op1, vl); + return __riscv_vmsbf_m_b4(op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { - return vmsbf_m_b8(op1, vl); + return __riscv_vmsbf_m_b8(op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) { - return vmsbf_m_b16(op1, vl); + return __riscv_vmsbf_m_b16(op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) { - return vmsbf_m_b32(op1, vl); + return __riscv_vmsbf_m_b32(op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b64( @@ -66,7 +66,7 @@ vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { - return vmsbf_m_b64(op1, vl); + return __riscv_vmsbf_m_b64(op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b1_m( @@ -75,7 +75,7 @@ vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return vmsbf_m_b1_m(mask, op1, vl); + return __riscv_vmsbf_m_b1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b2_m( @@ -84,7 +84,7 @@ vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return vmsbf_m_b2_m(mask, op1, vl); + return __riscv_vmsbf_m_b2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b4_m( @@ -93,7 +93,7 @@ vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return vmsbf_m_b4_m(mask, op1, vl); + return __riscv_vmsbf_m_b4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b8_m( @@ -102,7 +102,7 @@ vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return vmsbf_m_b8_m(mask, op1, vl); + return __riscv_vmsbf_m_b8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b16_m( @@ -111,7 +111,7 @@ vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return vmsbf_m_b16_m(mask, op1, vl); + return __riscv_vmsbf_m_b16_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b32_m( @@ -120,7 +120,7 @@ vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return vmsbf_m_b32_m(mask, op1, vl); + return __riscv_vmsbf_m_b32_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b64_m( @@ -129,6 +129,6 @@ vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return vmsbf_m_b64_m(mask, op1, vl); + return __riscv_vmsbf_m_b64_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmseq.c index d1824ff0a570..783a046c9435 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmseq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmseq.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmseq_vv_i8mf8_b64(op1, op2, vl); + return __riscv_vmseq_vv_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf8_b64(op1, op2, vl); + return __riscv_vmseq_vx_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmseq_vv_i8mf4_b32(op1, op2, vl); + return __riscv_vmseq_vv_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf4_b32(op1, op2, vl); + return __riscv_vmseq_vx_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmseq_vv_i8mf2_b16(op1, op2, vl); + return __riscv_vmseq_vv_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf2_b16(op1, op2, vl); + return __riscv_vmseq_vx_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmseq_vv_i8m1_b8(op1, op2, vl); + return __riscv_vmseq_vv_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m1_b8(op1, op2, vl); + return __riscv_vmseq_vx_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmseq_vv_i8m2_b4(op1, op2, vl); + return __riscv_vmseq_vv_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m2_b4(op1, op2, vl); + return __riscv_vmseq_vx_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmseq_vv_i8m4_b2(op1, op2, vl); + return __riscv_vmseq_vv_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m4_b2(op1, op2, vl); + return __riscv_vmseq_vx_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1( @@ -121,7 +121,7 @@ vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmseq_vv_i8m8_b1(op1, op2, vl); + return __riscv_vmseq_vv_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1( @@ -130,7 +130,7 @@ vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m8_b1(op1, op2, vl); + return __riscv_vmseq_vx_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64( @@ -139,7 +139,7 @@ vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmseq_vv_i16mf4_b64(op1, op2, vl); + return __riscv_vmseq_vv_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64( @@ -148,7 +148,7 @@ vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16mf4_b64(op1, op2, vl); + return __riscv_vmseq_vx_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32( @@ -157,7 +157,7 @@ vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmseq_vv_i16mf2_b32(op1, op2, vl); + return __riscv_vmseq_vv_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32( @@ -166,7 +166,7 @@ vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16mf2_b32(op1, op2, vl); + return __riscv_vmseq_vx_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16( @@ -175,7 +175,7 @@ vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmseq_vv_i16m1_b16(op1, op2, vl); + return __riscv_vmseq_vv_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16( @@ -184,7 +184,7 @@ vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m1_b16(op1, op2, vl); + return __riscv_vmseq_vx_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8( @@ -193,7 +193,7 @@ vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmseq_vv_i16m2_b8(op1, op2, vl); + return __riscv_vmseq_vv_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8( @@ -202,7 +202,7 @@ vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m2_b8(op1, op2, vl); + return __riscv_vmseq_vx_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4( @@ -211,7 +211,7 @@ vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmseq_vv_i16m4_b4(op1, op2, vl); + return __riscv_vmseq_vv_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4( @@ -220,7 +220,7 @@ vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m4_b4(op1, op2, vl); + return __riscv_vmseq_vx_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2( @@ -229,7 +229,7 @@ vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmseq_vv_i16m8_b2(op1, op2, vl); + return __riscv_vmseq_vv_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2( @@ -238,7 +238,7 @@ vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m8_b2(op1, op2, vl); + return __riscv_vmseq_vx_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64( @@ -247,7 +247,7 @@ vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmseq_vv_i32mf2_b64(op1, op2, vl); + return __riscv_vmseq_vv_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64( @@ -256,7 +256,7 @@ vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32mf2_b64(op1, op2, vl); + return __riscv_vmseq_vx_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32( @@ -265,7 +265,7 @@ vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmseq_vv_i32m1_b32(op1, op2, vl); + return __riscv_vmseq_vv_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32( @@ -274,7 +274,7 @@ vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m1_b32(op1, op2, vl); + return __riscv_vmseq_vx_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16( @@ -283,7 +283,7 @@ vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmseq_vv_i32m2_b16(op1, op2, vl); + return __riscv_vmseq_vv_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16( @@ -292,7 +292,7 @@ vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m2_b16(op1, op2, vl); + return __riscv_vmseq_vx_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8( @@ -301,7 +301,7 @@ vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmseq_vv_i32m4_b8(op1, op2, vl); + return __riscv_vmseq_vv_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8( @@ -310,7 +310,7 @@ vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m4_b8(op1, op2, vl); + return __riscv_vmseq_vx_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4( @@ -319,7 +319,7 @@ vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmseq_vv_i32m8_b4(op1, op2, vl); + return __riscv_vmseq_vv_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4( @@ -328,7 +328,7 @@ vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m8_b4(op1, op2, vl); + return __riscv_vmseq_vx_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64( @@ -337,7 +337,7 @@ vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmseq_vv_i64m1_b64(op1, op2, vl); + return __riscv_vmseq_vv_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64( @@ -346,7 +346,7 @@ vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m1_b64(op1, op2, vl); + return __riscv_vmseq_vx_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32( @@ -355,7 +355,7 @@ vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmseq_vv_i64m2_b32(op1, op2, vl); + return __riscv_vmseq_vv_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32( @@ -364,7 +364,7 @@ vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m2_b32(op1, op2, vl); + return __riscv_vmseq_vx_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16( @@ -373,7 +373,7 @@ vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmseq_vv_i64m4_b16(op1, op2, vl); + return __riscv_vmseq_vv_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16( @@ -382,7 +382,7 @@ vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m4_b16(op1, op2, vl); + return __riscv_vmseq_vx_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8( @@ -391,7 +391,7 @@ vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmseq_vv_i64m8_b8(op1, op2, vl); + return __riscv_vmseq_vv_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8( @@ -400,7 +400,7 @@ vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m8_b8(op1, op2, vl); + return __riscv_vmseq_vx_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64( @@ -409,7 +409,7 @@ vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmseq_vv_u8mf8_b64(op1, op2, vl); + return __riscv_vmseq_vv_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64( @@ -418,7 +418,7 @@ vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf8_b64(op1, op2, vl); + return __riscv_vmseq_vx_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32( @@ -427,7 +427,7 @@ vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmseq_vv_u8mf4_b32(op1, op2, vl); + return __riscv_vmseq_vv_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32( @@ -436,7 +436,7 @@ vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf4_b32(op1, op2, vl); + return __riscv_vmseq_vx_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16( @@ -445,7 +445,7 @@ vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmseq_vv_u8mf2_b16(op1, op2, vl); + return __riscv_vmseq_vv_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16( @@ -454,7 +454,7 @@ vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf2_b16(op1, op2, vl); + return __riscv_vmseq_vx_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8( @@ -463,7 +463,7 @@ vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmseq_vv_u8m1_b8(op1, op2, vl); + return __riscv_vmseq_vv_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8( @@ -472,7 +472,7 @@ vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m1_b8(op1, op2, vl); + return __riscv_vmseq_vx_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4( @@ -481,7 +481,7 @@ vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmseq_vv_u8m2_b4(op1, op2, vl); + return __riscv_vmseq_vv_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4( @@ -490,7 +490,7 @@ vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m2_b4(op1, op2, vl); + return __riscv_vmseq_vx_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2( @@ -499,7 +499,7 @@ vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmseq_vv_u8m4_b2(op1, op2, vl); + return __riscv_vmseq_vv_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2( @@ -508,7 +508,7 @@ vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m4_b2(op1, op2, vl); + return __riscv_vmseq_vx_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1( @@ -517,7 +517,7 @@ vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmseq_vv_u8m8_b1(op1, op2, vl); + return __riscv_vmseq_vv_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1( @@ -526,7 +526,7 @@ vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m8_b1(op1, op2, vl); + return __riscv_vmseq_vx_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64( @@ -535,7 +535,7 @@ vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmseq_vv_u16mf4_b64(op1, op2, vl); + return __riscv_vmseq_vv_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64( @@ -544,7 +544,7 @@ vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16mf4_b64(op1, op2, vl); + return __riscv_vmseq_vx_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32( @@ -553,7 +553,7 @@ vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmseq_vv_u16mf2_b32(op1, op2, vl); + return __riscv_vmseq_vv_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32( @@ -562,7 +562,7 @@ vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16mf2_b32(op1, op2, vl); + return __riscv_vmseq_vx_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16( @@ -571,7 +571,7 @@ vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmseq_vv_u16m1_b16(op1, op2, vl); + return __riscv_vmseq_vv_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16( @@ -580,7 +580,7 @@ vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m1_b16(op1, op2, vl); + return __riscv_vmseq_vx_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8( @@ -589,7 +589,7 @@ vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmseq_vv_u16m2_b8(op1, op2, vl); + return __riscv_vmseq_vv_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8( @@ -598,7 +598,7 @@ vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m2_b8(op1, op2, vl); + return __riscv_vmseq_vx_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4( @@ -607,7 +607,7 @@ vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmseq_vv_u16m4_b4(op1, op2, vl); + return __riscv_vmseq_vv_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4( @@ -616,7 +616,7 @@ vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m4_b4(op1, op2, vl); + return __riscv_vmseq_vx_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2( @@ -625,7 +625,7 @@ vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmseq_vv_u16m8_b2(op1, op2, vl); + return __riscv_vmseq_vv_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2( @@ -634,7 +634,7 @@ vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m8_b2(op1, op2, vl); + return __riscv_vmseq_vx_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64( @@ -643,7 +643,7 @@ vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmseq_vv_u32mf2_b64(op1, op2, vl); + return __riscv_vmseq_vv_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64( @@ -652,7 +652,7 @@ vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32mf2_b64(op1, op2, vl); + return __riscv_vmseq_vx_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32( @@ -661,7 +661,7 @@ vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmseq_vv_u32m1_b32(op1, op2, vl); + return __riscv_vmseq_vv_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32( @@ -670,7 +670,7 @@ vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m1_b32(op1, op2, vl); + return __riscv_vmseq_vx_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16( @@ -679,7 +679,7 @@ vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmseq_vv_u32m2_b16(op1, op2, vl); + return __riscv_vmseq_vv_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16( @@ -688,7 +688,7 @@ vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m2_b16(op1, op2, vl); + return __riscv_vmseq_vx_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8( @@ -697,7 +697,7 @@ vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmseq_vv_u32m4_b8(op1, op2, vl); + return __riscv_vmseq_vv_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8( @@ -706,7 +706,7 @@ vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m4_b8(op1, op2, vl); + return __riscv_vmseq_vx_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4( @@ -715,7 +715,7 @@ vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmseq_vv_u32m8_b4(op1, op2, vl); + return __riscv_vmseq_vv_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4( @@ -724,7 +724,7 @@ vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m8_b4(op1, op2, vl); + return __riscv_vmseq_vx_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64( @@ -733,7 +733,7 @@ vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmseq_vv_u64m1_b64(op1, op2, vl); + return __riscv_vmseq_vv_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64( @@ -742,7 +742,7 @@ vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m1_b64(op1, op2, vl); + return __riscv_vmseq_vx_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32( @@ -751,7 +751,7 @@ vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmseq_vv_u64m2_b32(op1, op2, vl); + return __riscv_vmseq_vv_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32( @@ -760,7 +760,7 @@ vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m2_b32(op1, op2, vl); + return __riscv_vmseq_vx_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16( @@ -769,7 +769,7 @@ vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmseq_vv_u64m4_b16(op1, op2, vl); + return __riscv_vmseq_vv_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16( @@ -778,7 +778,7 @@ vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m4_b16(op1, op2, vl); + return __riscv_vmseq_vx_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8( @@ -787,7 +787,7 @@ vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmseq_vv_u64m8_b8(op1, op2, vl); + return __riscv_vmseq_vv_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8( @@ -796,7 +796,7 @@ vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m8_b8(op1, op2, vl); + return __riscv_vmseq_vx_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64_m( @@ -805,7 +805,7 @@ vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmseq_vv_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_m( @@ -814,7 +814,7 @@ vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_m( @@ -823,7 +823,7 @@ vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmseq_vv_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_m( @@ -832,7 +832,7 @@ vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_m( @@ -841,7 +841,7 @@ vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmseq_vv_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_m( @@ -850,7 +850,7 @@ vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_m( @@ -859,7 +859,7 @@ vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmseq_vv_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_m( @@ -868,7 +868,7 @@ vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_m( @@ -877,7 +877,7 @@ vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmseq_vv_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_m( @@ -886,7 +886,7 @@ vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_m( @@ -895,7 +895,7 @@ vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmseq_vv_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_m( @@ -904,7 +904,7 @@ vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_m( @@ -913,7 +913,7 @@ vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmseq_vv_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_m( @@ -922,7 +922,7 @@ vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_m( @@ -931,7 +931,7 @@ vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmseq_vv_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_m( @@ -940,7 +940,7 @@ vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_m( @@ -949,7 +949,7 @@ vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmseq_vv_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_m( @@ -958,7 +958,7 @@ vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_m( @@ -967,7 +967,7 @@ vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmseq_vv_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_m( @@ -976,7 +976,7 @@ vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_m( @@ -985,7 +985,7 @@ vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmseq_vv_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_m( @@ -994,7 +994,7 @@ vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_m( @@ -1003,7 +1003,7 @@ vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmseq_vv_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_m( @@ -1012,7 +1012,7 @@ vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_m( @@ -1021,7 +1021,7 @@ vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmseq_vv_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_m( @@ -1030,7 +1030,7 @@ vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_m( @@ -1039,7 +1039,7 @@ vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmseq_vv_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_m( @@ -1048,7 +1048,7 @@ vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_m( @@ -1057,7 +1057,7 @@ vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmseq_vv_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_m( @@ -1066,7 +1066,7 @@ vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_m( @@ -1075,7 +1075,7 @@ vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmseq_vv_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_m( @@ -1084,7 +1084,7 @@ vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_m( @@ -1093,7 +1093,7 @@ vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmseq_vv_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_m( @@ -1102,7 +1102,7 @@ vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_m( @@ -1111,7 +1111,7 @@ vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmseq_vv_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_m( @@ -1120,7 +1120,7 @@ vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_m( @@ -1129,7 +1129,7 @@ vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmseq_vv_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_m( @@ -1138,7 +1138,7 @@ vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_m( @@ -1147,7 +1147,7 @@ vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmseq_vv_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_m( @@ -1156,7 +1156,7 @@ vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_m( @@ -1165,7 +1165,7 @@ vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmseq_vv_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_m( @@ -1174,7 +1174,7 @@ vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_m( @@ -1183,7 +1183,7 @@ vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmseq_vv_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_m( @@ -1192,7 +1192,7 @@ vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_m( @@ -1201,7 +1201,7 @@ vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmseq_vv_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_m( @@ -1210,7 +1210,7 @@ vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_m( @@ -1219,7 +1219,7 @@ vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmseq_vv_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_m( @@ -1228,7 +1228,7 @@ vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_m( @@ -1237,7 +1237,7 @@ vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmseq_vv_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_m( @@ -1246,7 +1246,7 @@ vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_m( @@ -1255,7 +1255,7 @@ vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmseq_vv_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_m( @@ -1264,7 +1264,7 @@ vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_m( @@ -1273,7 +1273,7 @@ vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmseq_vv_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_m( @@ -1282,7 +1282,7 @@ vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_m( @@ -1291,7 +1291,7 @@ vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmseq_vv_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_m( @@ -1300,7 +1300,7 @@ vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_m( @@ -1309,7 +1309,7 @@ vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmseq_vv_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_m( @@ -1318,7 +1318,7 @@ vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_m( @@ -1327,7 +1327,7 @@ vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmseq_vv_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_m( @@ -1336,7 +1336,7 @@ vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_m( @@ -1345,7 +1345,7 @@ vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmseq_vv_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_m( @@ -1354,7 +1354,7 @@ vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_m( @@ -1363,7 +1363,7 @@ vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmseq_vv_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_m( @@ -1372,7 +1372,7 @@ vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_m( @@ -1381,7 +1381,7 @@ vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmseq_vv_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_m( @@ -1390,7 +1390,7 @@ vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_m( @@ -1399,7 +1399,7 @@ vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmseq_vv_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_m( @@ -1408,7 +1408,7 @@ vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_m( @@ -1417,7 +1417,7 @@ vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmseq_vv_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_m( @@ -1426,7 +1426,7 @@ vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_m( @@ -1435,7 +1435,7 @@ vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmseq_vv_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_m( @@ -1444,7 +1444,7 @@ vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_m( @@ -1453,7 +1453,7 @@ vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmseq_vv_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_m( @@ -1462,7 +1462,7 @@ vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_m( @@ -1471,7 +1471,7 @@ vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmseq_vv_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_m( @@ -1480,7 +1480,7 @@ vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_m( @@ -1489,7 +1489,7 @@ vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmseq_vv_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_m( @@ -1498,7 +1498,7 @@ vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_m( @@ -1507,7 +1507,7 @@ vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmseq_vv_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_m( @@ -1516,7 +1516,7 @@ vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_m( @@ -1525,7 +1525,7 @@ vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmseq_vv_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_m( @@ -1534,7 +1534,7 @@ vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_m( @@ -1543,7 +1543,7 @@ vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmseq_vv_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_m( @@ -1552,7 +1552,7 @@ vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_m( @@ -1561,7 +1561,7 @@ vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmseq_vv_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_m( @@ -1570,7 +1570,7 @@ vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_m( @@ -1579,7 +1579,7 @@ vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmseq_vv_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_m( @@ -1588,6 +1588,6 @@ vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmseq_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmset.c index 4c2d141616e7..e90daaea1860 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmset.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmset.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmset_m_b1(size_t vl) { - return vmset_m_b1(vl); + return __riscv_vmset_m_b1(vl); } // CHECK-RV64-LABEL: @test_vmset_m_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmset_m_b1(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmset_m_b2(size_t vl) { - return vmset_m_b2(vl); + return __riscv_vmset_m_b2(vl); } // CHECK-RV64-LABEL: @test_vmset_m_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmset_m_b2(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmset_m_b4(size_t vl) { - return vmset_m_b4(vl); + return __riscv_vmset_m_b4(vl); } // CHECK-RV64-LABEL: @test_vmset_m_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmset_m_b4(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmset_m_b8(size_t vl) { - return vmset_m_b8(vl); + return __riscv_vmset_m_b8(vl); } // CHECK-RV64-LABEL: @test_vmset_m_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmset_m_b8(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmset_m_b16(size_t vl) { - return vmset_m_b16(vl); + return __riscv_vmset_m_b16(vl); } // CHECK-RV64-LABEL: @test_vmset_m_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmset_m_b16(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmset_m_b32(size_t vl) { - return vmset_m_b32(vl); + return __riscv_vmset_m_b32(vl); } // CHECK-RV64-LABEL: @test_vmset_m_b64( @@ -66,6 +66,6 @@ vbool32_t test_vmset_m_b32(size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmset_m_b64(size_t vl) { - return vmset_m_b64(vl); + return __riscv_vmset_m_b64(vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsge.c index 9e5235e3aa9f..1e6001027d4b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsge.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsge_vv_i8mf8_b64(op1, op2, vl); + return __riscv_vmsge_vv_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmsge_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf8_b64(op1, op2, vl); + return __riscv_vmsge_vx_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmsge_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsge_vv_i8mf4_b32(op1, op2, vl); + return __riscv_vmsge_vv_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmsge_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf4_b32(op1, op2, vl); + return __riscv_vmsge_vx_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmsge_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsge_vv_i8mf2_b16(op1, op2, vl); + return __riscv_vmsge_vv_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmsge_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf2_b16(op1, op2, vl); + return __riscv_vmsge_vx_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmsge_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsge_vv_i8m1_b8(op1, op2, vl); + return __riscv_vmsge_vv_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmsge_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m1_b8(op1, op2, vl); + return __riscv_vmsge_vx_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmsge_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsge_vv_i8m2_b4(op1, op2, vl); + return __riscv_vmsge_vv_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmsge_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m2_b4(op1, op2, vl); + return __riscv_vmsge_vx_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmsge_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsge_vv_i8m4_b2(op1, op2, vl); + return __riscv_vmsge_vv_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmsge_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m4_b2(op1, op2, vl); + return __riscv_vmsge_vx_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1( @@ -121,7 +121,7 @@ vbool2_t test_vmsge_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsge_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsge_vv_i8m8_b1(op1, op2, vl); + return __riscv_vmsge_vv_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1( @@ -130,7 +130,7 @@ vbool1_t test_vmsge_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsge_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m8_b1(op1, op2, vl); + return __riscv_vmsge_vx_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64( @@ -139,7 +139,7 @@ vbool1_t test_vmsge_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsge_vv_i16mf4_b64(op1, op2, vl); + return __riscv_vmsge_vv_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64( @@ -148,7 +148,7 @@ vbool64_t test_vmsge_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16mf4_b64(op1, op2, vl); + return __riscv_vmsge_vx_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32( @@ -157,7 +157,7 @@ vbool64_t test_vmsge_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsge_vv_i16mf2_b32(op1, op2, vl); + return __riscv_vmsge_vv_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32( @@ -166,7 +166,7 @@ vbool32_t test_vmsge_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16mf2_b32(op1, op2, vl); + return __riscv_vmsge_vx_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16( @@ -175,7 +175,7 @@ vbool32_t test_vmsge_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsge_vv_i16m1_b16(op1, op2, vl); + return __riscv_vmsge_vv_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16( @@ -184,7 +184,7 @@ vbool16_t test_vmsge_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m1_b16(op1, op2, vl); + return __riscv_vmsge_vx_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8( @@ -193,7 +193,7 @@ vbool16_t test_vmsge_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsge_vv_i16m2_b8(op1, op2, vl); + return __riscv_vmsge_vv_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8( @@ -202,7 +202,7 @@ vbool8_t test_vmsge_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m2_b8(op1, op2, vl); + return __riscv_vmsge_vx_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4( @@ -211,7 +211,7 @@ vbool8_t test_vmsge_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsge_vv_i16m4_b4(op1, op2, vl); + return __riscv_vmsge_vv_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4( @@ -220,7 +220,7 @@ vbool4_t test_vmsge_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m4_b4(op1, op2, vl); + return __riscv_vmsge_vx_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2( @@ -229,7 +229,7 @@ vbool4_t test_vmsge_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsge_vv_i16m8_b2(op1, op2, vl); + return __riscv_vmsge_vv_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2( @@ -238,7 +238,7 @@ vbool2_t test_vmsge_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m8_b2(op1, op2, vl); + return __riscv_vmsge_vx_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64( @@ -247,7 +247,7 @@ vbool2_t test_vmsge_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsge_vv_i32mf2_b64(op1, op2, vl); + return __riscv_vmsge_vv_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64( @@ -256,7 +256,7 @@ vbool64_t test_vmsge_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32mf2_b64(op1, op2, vl); + return __riscv_vmsge_vx_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32( @@ -265,7 +265,7 @@ vbool64_t test_vmsge_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsge_vv_i32m1_b32(op1, op2, vl); + return __riscv_vmsge_vv_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32( @@ -274,7 +274,7 @@ vbool32_t test_vmsge_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m1_b32(op1, op2, vl); + return __riscv_vmsge_vx_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16( @@ -283,7 +283,7 @@ vbool32_t test_vmsge_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsge_vv_i32m2_b16(op1, op2, vl); + return __riscv_vmsge_vv_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16( @@ -292,7 +292,7 @@ vbool16_t test_vmsge_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m2_b16(op1, op2, vl); + return __riscv_vmsge_vx_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8( @@ -301,7 +301,7 @@ vbool16_t test_vmsge_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsge_vv_i32m4_b8(op1, op2, vl); + return __riscv_vmsge_vv_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8( @@ -310,7 +310,7 @@ vbool8_t test_vmsge_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m4_b8(op1, op2, vl); + return __riscv_vmsge_vx_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4( @@ -319,7 +319,7 @@ vbool8_t test_vmsge_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsge_vv_i32m8_b4(op1, op2, vl); + return __riscv_vmsge_vv_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4( @@ -328,7 +328,7 @@ vbool4_t test_vmsge_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m8_b4(op1, op2, vl); + return __riscv_vmsge_vx_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64( @@ -337,7 +337,7 @@ vbool4_t test_vmsge_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsge_vv_i64m1_b64(op1, op2, vl); + return __riscv_vmsge_vv_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64( @@ -346,7 +346,7 @@ vbool64_t test_vmsge_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m1_b64(op1, op2, vl); + return __riscv_vmsge_vx_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32( @@ -355,7 +355,7 @@ vbool64_t test_vmsge_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsge_vv_i64m2_b32(op1, op2, vl); + return __riscv_vmsge_vv_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32( @@ -364,7 +364,7 @@ vbool32_t test_vmsge_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m2_b32(op1, op2, vl); + return __riscv_vmsge_vx_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16( @@ -373,7 +373,7 @@ vbool32_t test_vmsge_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsge_vv_i64m4_b16(op1, op2, vl); + return __riscv_vmsge_vv_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16( @@ -382,7 +382,7 @@ vbool16_t test_vmsge_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m4_b16(op1, op2, vl); + return __riscv_vmsge_vx_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8( @@ -391,7 +391,7 @@ vbool16_t test_vmsge_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsge_vv_i64m8_b8(op1, op2, vl); + return __riscv_vmsge_vv_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8( @@ -400,7 +400,7 @@ vbool8_t test_vmsge_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m8_b8(op1, op2, vl); + return __riscv_vmsge_vx_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf8_b64_m( @@ -409,7 +409,7 @@ vbool8_t test_vmsge_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsge_vv_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64_m( @@ -418,7 +418,7 @@ vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32_m( @@ -427,7 +427,7 @@ vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsge_vv_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32_m( @@ -436,7 +436,7 @@ vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16_m( @@ -445,7 +445,7 @@ vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsge_vv_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16_m( @@ -454,7 +454,7 @@ vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8_m( @@ -463,7 +463,7 @@ vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsge_vv_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8_m( @@ -472,7 +472,7 @@ vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4_m( @@ -481,7 +481,7 @@ vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsge_vv_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4_m( @@ -490,7 +490,7 @@ vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2_m( @@ -499,7 +499,7 @@ vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsge_vv_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2_m( @@ -508,7 +508,7 @@ vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1_m( @@ -517,7 +517,7 @@ vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsge_vv_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1_m( @@ -526,7 +526,7 @@ vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64_m( @@ -535,7 +535,7 @@ vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsge_vv_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64_m( @@ -544,7 +544,7 @@ vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32_m( @@ -553,7 +553,7 @@ vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsge_vv_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32_m( @@ -562,7 +562,7 @@ vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16_m( @@ -571,7 +571,7 @@ vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsge_vv_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16_m( @@ -580,7 +580,7 @@ vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8_m( @@ -589,7 +589,7 @@ vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsge_vv_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8_m( @@ -598,7 +598,7 @@ vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4_m( @@ -607,7 +607,7 @@ vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsge_vv_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4_m( @@ -616,7 +616,7 @@ vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2_m( @@ -625,7 +625,7 @@ vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsge_vv_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2_m( @@ -634,7 +634,7 @@ vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_m( @@ -643,7 +643,7 @@ vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsge_vv_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_m( @@ -652,7 +652,7 @@ vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32_m( @@ -661,7 +661,7 @@ vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsge_vv_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32_m( @@ -670,7 +670,7 @@ vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16_m( @@ -679,7 +679,7 @@ vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsge_vv_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16_m( @@ -688,7 +688,7 @@ vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8_m( @@ -697,7 +697,7 @@ vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsge_vv_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8_m( @@ -706,7 +706,7 @@ vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4_m( @@ -715,7 +715,7 @@ vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsge_vv_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4_m( @@ -724,7 +724,7 @@ vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64_m( @@ -733,7 +733,7 @@ vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsge_vv_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64_m( @@ -742,7 +742,7 @@ vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32_m( @@ -751,7 +751,7 @@ vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsge_vv_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32_m( @@ -760,7 +760,7 @@ vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16_m( @@ -769,7 +769,7 @@ vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsge_vv_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16_m( @@ -778,7 +778,7 @@ vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8_m( @@ -787,7 +787,7 @@ vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsge_vv_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsge_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8_m( @@ -796,6 +796,6 @@ vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsge_vx_i64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgeu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgeu.c index 0fa09b0f825f..5379f9e12f03 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgeu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgeu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsgeu_vv_u8mf8_b64(op1, op2, vl); + return __riscv_vmsgeu_vv_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmsgeu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf8_b64(op1, op2, vl); + return __riscv_vmsgeu_vx_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmsgeu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsgeu_vv_u8mf4_b32(op1, op2, vl); + return __riscv_vmsgeu_vv_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmsgeu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf4_b32(op1, op2, vl); + return __riscv_vmsgeu_vx_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmsgeu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsgeu_vv_u8mf2_b16(op1, op2, vl); + return __riscv_vmsgeu_vv_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmsgeu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf2_b16(op1, op2, vl); + return __riscv_vmsgeu_vx_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmsgeu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsgeu_vv_u8m1_b8(op1, op2, vl); + return __riscv_vmsgeu_vv_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmsgeu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m1_b8(op1, op2, vl); + return __riscv_vmsgeu_vx_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmsgeu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsgeu_vv_u8m2_b4(op1, op2, vl); + return __riscv_vmsgeu_vv_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmsgeu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m2_b4(op1, op2, vl); + return __riscv_vmsgeu_vx_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmsgeu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsgeu_vv_u8m4_b2(op1, op2, vl); + return __riscv_vmsgeu_vv_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmsgeu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m4_b2(op1, op2, vl); + return __riscv_vmsgeu_vx_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1( @@ -121,7 +121,7 @@ vbool2_t test_vmsgeu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgeu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsgeu_vv_u8m8_b1(op1, op2, vl); + return __riscv_vmsgeu_vv_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1( @@ -130,7 +130,7 @@ vbool1_t test_vmsgeu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgeu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m8_b1(op1, op2, vl); + return __riscv_vmsgeu_vx_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64( @@ -139,7 +139,7 @@ vbool1_t test_vmsgeu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsgeu_vv_u16mf4_b64(op1, op2, vl); + return __riscv_vmsgeu_vv_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64( @@ -148,7 +148,7 @@ vbool64_t test_vmsgeu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16mf4_b64(op1, op2, vl); + return __riscv_vmsgeu_vx_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32( @@ -157,7 +157,7 @@ vbool64_t test_vmsgeu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsgeu_vv_u16mf2_b32(op1, op2, vl); + return __riscv_vmsgeu_vv_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32( @@ -166,7 +166,7 @@ vbool32_t test_vmsgeu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16mf2_b32(op1, op2, vl); + return __riscv_vmsgeu_vx_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16( @@ -175,7 +175,7 @@ vbool32_t test_vmsgeu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsgeu_vv_u16m1_b16(op1, op2, vl); + return __riscv_vmsgeu_vv_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16( @@ -184,7 +184,7 @@ vbool16_t test_vmsgeu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m1_b16(op1, op2, vl); + return __riscv_vmsgeu_vx_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8( @@ -193,7 +193,7 @@ vbool16_t test_vmsgeu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsgeu_vv_u16m2_b8(op1, op2, vl); + return __riscv_vmsgeu_vv_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8( @@ -202,7 +202,7 @@ vbool8_t test_vmsgeu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m2_b8(op1, op2, vl); + return __riscv_vmsgeu_vx_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4( @@ -211,7 +211,7 @@ vbool8_t test_vmsgeu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsgeu_vv_u16m4_b4(op1, op2, vl); + return __riscv_vmsgeu_vv_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4( @@ -220,7 +220,7 @@ vbool4_t test_vmsgeu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m4_b4(op1, op2, vl); + return __riscv_vmsgeu_vx_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2( @@ -229,7 +229,7 @@ vbool4_t test_vmsgeu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsgeu_vv_u16m8_b2(op1, op2, vl); + return __riscv_vmsgeu_vv_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2( @@ -238,7 +238,7 @@ vbool2_t test_vmsgeu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m8_b2(op1, op2, vl); + return __riscv_vmsgeu_vx_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64( @@ -247,7 +247,7 @@ vbool2_t test_vmsgeu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsgeu_vv_u32mf2_b64(op1, op2, vl); + return __riscv_vmsgeu_vv_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64( @@ -256,7 +256,7 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32mf2_b64(op1, op2, vl); + return __riscv_vmsgeu_vx_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32( @@ -265,7 +265,7 @@ vbool64_t test_vmsgeu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsgeu_vv_u32m1_b32(op1, op2, vl); + return __riscv_vmsgeu_vv_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32( @@ -274,7 +274,7 @@ vbool32_t test_vmsgeu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m1_b32(op1, op2, vl); + return __riscv_vmsgeu_vx_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16( @@ -283,7 +283,7 @@ vbool32_t test_vmsgeu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsgeu_vv_u32m2_b16(op1, op2, vl); + return __riscv_vmsgeu_vv_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16( @@ -292,7 +292,7 @@ vbool16_t test_vmsgeu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m2_b16(op1, op2, vl); + return __riscv_vmsgeu_vx_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8( @@ -301,7 +301,7 @@ vbool16_t test_vmsgeu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsgeu_vv_u32m4_b8(op1, op2, vl); + return __riscv_vmsgeu_vv_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8( @@ -310,7 +310,7 @@ vbool8_t test_vmsgeu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m4_b8(op1, op2, vl); + return __riscv_vmsgeu_vx_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4( @@ -319,7 +319,7 @@ vbool8_t test_vmsgeu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsgeu_vv_u32m8_b4(op1, op2, vl); + return __riscv_vmsgeu_vv_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4( @@ -328,7 +328,7 @@ vbool4_t test_vmsgeu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m8_b4(op1, op2, vl); + return __riscv_vmsgeu_vx_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64( @@ -337,7 +337,7 @@ vbool4_t test_vmsgeu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsgeu_vv_u64m1_b64(op1, op2, vl); + return __riscv_vmsgeu_vv_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64( @@ -346,7 +346,7 @@ vbool64_t test_vmsgeu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m1_b64(op1, op2, vl); + return __riscv_vmsgeu_vx_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32( @@ -355,7 +355,7 @@ vbool64_t test_vmsgeu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsgeu_vv_u64m2_b32(op1, op2, vl); + return __riscv_vmsgeu_vv_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32( @@ -364,7 +364,7 @@ vbool32_t test_vmsgeu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m2_b32(op1, op2, vl); + return __riscv_vmsgeu_vx_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16( @@ -373,7 +373,7 @@ vbool32_t test_vmsgeu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsgeu_vv_u64m4_b16(op1, op2, vl); + return __riscv_vmsgeu_vv_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16( @@ -382,7 +382,7 @@ vbool16_t test_vmsgeu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m4_b16(op1, op2, vl); + return __riscv_vmsgeu_vx_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8( @@ -391,7 +391,7 @@ vbool16_t test_vmsgeu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsgeu_vv_u64m8_b8(op1, op2, vl); + return __riscv_vmsgeu_vv_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8( @@ -400,7 +400,7 @@ vbool8_t test_vmsgeu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m8_b8(op1, op2, vl); + return __riscv_vmsgeu_vx_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf8_b64_m( @@ -409,7 +409,7 @@ vbool8_t test_vmsgeu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsgeu_vv_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64_m( @@ -418,7 +418,7 @@ vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32_m( @@ -427,7 +427,7 @@ vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsgeu_vv_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32_m( @@ -436,7 +436,7 @@ vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16_m( @@ -445,7 +445,7 @@ vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsgeu_vv_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16_m( @@ -454,7 +454,7 @@ vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8_m( @@ -463,7 +463,7 @@ vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsgeu_vv_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8_m( @@ -472,7 +472,7 @@ vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4_m( @@ -481,7 +481,7 @@ vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsgeu_vv_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4_m( @@ -490,7 +490,7 @@ vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2_m( @@ -499,7 +499,7 @@ vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsgeu_vv_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2_m( @@ -508,7 +508,7 @@ vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1_m( @@ -517,7 +517,7 @@ vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsgeu_vv_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1_m( @@ -526,7 +526,7 @@ vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64_m( @@ -535,7 +535,7 @@ vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsgeu_vv_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64_m( @@ -544,7 +544,7 @@ vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32_m( @@ -553,7 +553,7 @@ vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsgeu_vv_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32_m( @@ -562,7 +562,7 @@ vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16_m( @@ -571,7 +571,7 @@ vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsgeu_vv_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16_m( @@ -580,7 +580,7 @@ vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8_m( @@ -589,7 +589,7 @@ vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsgeu_vv_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8_m( @@ -598,7 +598,7 @@ vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4_m( @@ -607,7 +607,7 @@ vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsgeu_vv_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4_m( @@ -616,7 +616,7 @@ vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2_m( @@ -625,7 +625,7 @@ vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsgeu_vv_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2_m( @@ -634,7 +634,7 @@ vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_m( @@ -643,7 +643,7 @@ vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsgeu_vv_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_m( @@ -652,7 +652,7 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32_m( @@ -661,7 +661,7 @@ vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsgeu_vv_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32_m( @@ -670,7 +670,7 @@ vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16_m( @@ -679,7 +679,7 @@ vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsgeu_vv_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16_m( @@ -688,7 +688,7 @@ vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8_m( @@ -697,7 +697,7 @@ vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsgeu_vv_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8_m( @@ -706,7 +706,7 @@ vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4_m( @@ -715,7 +715,7 @@ vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsgeu_vv_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4_m( @@ -724,7 +724,7 @@ vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64_m( @@ -733,7 +733,7 @@ vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsgeu_vv_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64_m( @@ -742,7 +742,7 @@ vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32_m( @@ -751,7 +751,7 @@ vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsgeu_vv_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32_m( @@ -760,7 +760,7 @@ vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16_m( @@ -769,7 +769,7 @@ vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsgeu_vv_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16_m( @@ -778,7 +778,7 @@ vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8_m( @@ -787,7 +787,7 @@ vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsgeu_vv_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8_m( @@ -796,6 +796,6 @@ vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsgeu_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgt.c index 25875b6adba7..1b597fbbe174 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsgt_vv_i8mf8_b64(op1, op2, vl); + return __riscv_vmsgt_vv_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmsgt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf8_b64(op1, op2, vl); + return __riscv_vmsgt_vx_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsgt_vv_i8mf4_b32(op1, op2, vl); + return __riscv_vmsgt_vv_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmsgt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf4_b32(op1, op2, vl); + return __riscv_vmsgt_vx_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsgt_vv_i8mf2_b16(op1, op2, vl); + return __riscv_vmsgt_vv_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmsgt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf2_b16(op1, op2, vl); + return __riscv_vmsgt_vx_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsgt_vv_i8m1_b8(op1, op2, vl); + return __riscv_vmsgt_vv_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmsgt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m1_b8(op1, op2, vl); + return __riscv_vmsgt_vx_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsgt_vv_i8m2_b4(op1, op2, vl); + return __riscv_vmsgt_vv_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmsgt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m2_b4(op1, op2, vl); + return __riscv_vmsgt_vx_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsgt_vv_i8m4_b2(op1, op2, vl); + return __riscv_vmsgt_vv_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmsgt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m4_b2(op1, op2, vl); + return __riscv_vmsgt_vx_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1( @@ -121,7 +121,7 @@ vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsgt_vv_i8m8_b1(op1, op2, vl); + return __riscv_vmsgt_vv_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1( @@ -130,7 +130,7 @@ vbool1_t test_vmsgt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m8_b1(op1, op2, vl); + return __riscv_vmsgt_vx_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64( @@ -139,7 +139,7 @@ vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsgt_vv_i16mf4_b64(op1, op2, vl); + return __riscv_vmsgt_vv_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64( @@ -148,7 +148,7 @@ vbool64_t test_vmsgt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16mf4_b64(op1, op2, vl); + return __riscv_vmsgt_vx_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32( @@ -157,7 +157,7 @@ vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsgt_vv_i16mf2_b32(op1, op2, vl); + return __riscv_vmsgt_vv_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32( @@ -166,7 +166,7 @@ vbool32_t test_vmsgt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16mf2_b32(op1, op2, vl); + return __riscv_vmsgt_vx_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16( @@ -175,7 +175,7 @@ vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsgt_vv_i16m1_b16(op1, op2, vl); + return __riscv_vmsgt_vv_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16( @@ -184,7 +184,7 @@ vbool16_t test_vmsgt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m1_b16(op1, op2, vl); + return __riscv_vmsgt_vx_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8( @@ -193,7 +193,7 @@ vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsgt_vv_i16m2_b8(op1, op2, vl); + return __riscv_vmsgt_vv_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8( @@ -202,7 +202,7 @@ vbool8_t test_vmsgt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m2_b8(op1, op2, vl); + return __riscv_vmsgt_vx_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4( @@ -211,7 +211,7 @@ vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsgt_vv_i16m4_b4(op1, op2, vl); + return __riscv_vmsgt_vv_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4( @@ -220,7 +220,7 @@ vbool4_t test_vmsgt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m4_b4(op1, op2, vl); + return __riscv_vmsgt_vx_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2( @@ -229,7 +229,7 @@ vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsgt_vv_i16m8_b2(op1, op2, vl); + return __riscv_vmsgt_vv_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2( @@ -238,7 +238,7 @@ vbool2_t test_vmsgt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m8_b2(op1, op2, vl); + return __riscv_vmsgt_vx_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64( @@ -247,7 +247,7 @@ vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsgt_vv_i32mf2_b64(op1, op2, vl); + return __riscv_vmsgt_vv_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64( @@ -256,7 +256,7 @@ vbool64_t test_vmsgt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32mf2_b64(op1, op2, vl); + return __riscv_vmsgt_vx_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32( @@ -265,7 +265,7 @@ vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsgt_vv_i32m1_b32(op1, op2, vl); + return __riscv_vmsgt_vv_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32( @@ -274,7 +274,7 @@ vbool32_t test_vmsgt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m1_b32(op1, op2, vl); + return __riscv_vmsgt_vx_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16( @@ -283,7 +283,7 @@ vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsgt_vv_i32m2_b16(op1, op2, vl); + return __riscv_vmsgt_vv_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16( @@ -292,7 +292,7 @@ vbool16_t test_vmsgt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m2_b16(op1, op2, vl); + return __riscv_vmsgt_vx_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8( @@ -301,7 +301,7 @@ vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsgt_vv_i32m4_b8(op1, op2, vl); + return __riscv_vmsgt_vv_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8( @@ -310,7 +310,7 @@ vbool8_t test_vmsgt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m4_b8(op1, op2, vl); + return __riscv_vmsgt_vx_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4( @@ -319,7 +319,7 @@ vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsgt_vv_i32m8_b4(op1, op2, vl); + return __riscv_vmsgt_vv_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4( @@ -328,7 +328,7 @@ vbool4_t test_vmsgt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m8_b4(op1, op2, vl); + return __riscv_vmsgt_vx_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64( @@ -337,7 +337,7 @@ vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsgt_vv_i64m1_b64(op1, op2, vl); + return __riscv_vmsgt_vv_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64( @@ -346,7 +346,7 @@ vbool64_t test_vmsgt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m1_b64(op1, op2, vl); + return __riscv_vmsgt_vx_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32( @@ -355,7 +355,7 @@ vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsgt_vv_i64m2_b32(op1, op2, vl); + return __riscv_vmsgt_vv_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32( @@ -364,7 +364,7 @@ vbool32_t test_vmsgt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m2_b32(op1, op2, vl); + return __riscv_vmsgt_vx_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16( @@ -373,7 +373,7 @@ vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsgt_vv_i64m4_b16(op1, op2, vl); + return __riscv_vmsgt_vv_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16( @@ -382,7 +382,7 @@ vbool16_t test_vmsgt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m4_b16(op1, op2, vl); + return __riscv_vmsgt_vx_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8( @@ -391,7 +391,7 @@ vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsgt_vv_i64m8_b8(op1, op2, vl); + return __riscv_vmsgt_vv_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8( @@ -400,7 +400,7 @@ vbool8_t test_vmsgt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m8_b8(op1, op2, vl); + return __riscv_vmsgt_vx_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf8_b64_m( @@ -409,7 +409,7 @@ vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsgt_vv_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_m( @@ -418,7 +418,7 @@ vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32_m( @@ -427,7 +427,7 @@ vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsgt_vv_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_m( @@ -436,7 +436,7 @@ vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16_m( @@ -445,7 +445,7 @@ vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsgt_vv_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_m( @@ -454,7 +454,7 @@ vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8_m( @@ -463,7 +463,7 @@ vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsgt_vv_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_m( @@ -472,7 +472,7 @@ vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4_m( @@ -481,7 +481,7 @@ vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsgt_vv_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_m( @@ -490,7 +490,7 @@ vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2_m( @@ -499,7 +499,7 @@ vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsgt_vv_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_m( @@ -508,7 +508,7 @@ vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1_m( @@ -517,7 +517,7 @@ vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsgt_vv_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_m( @@ -526,7 +526,7 @@ vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64_m( @@ -535,7 +535,7 @@ vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsgt_vv_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_m( @@ -544,7 +544,7 @@ vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32_m( @@ -553,7 +553,7 @@ vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsgt_vv_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_m( @@ -562,7 +562,7 @@ vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16_m( @@ -571,7 +571,7 @@ vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsgt_vv_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_m( @@ -580,7 +580,7 @@ vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8_m( @@ -589,7 +589,7 @@ vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsgt_vv_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_m( @@ -598,7 +598,7 @@ vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4_m( @@ -607,7 +607,7 @@ vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsgt_vv_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_m( @@ -616,7 +616,7 @@ vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2_m( @@ -625,7 +625,7 @@ vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsgt_vv_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_m( @@ -634,7 +634,7 @@ vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_m( @@ -643,7 +643,7 @@ vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsgt_vv_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_m( @@ -652,7 +652,7 @@ vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32_m( @@ -661,7 +661,7 @@ vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsgt_vv_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_m( @@ -670,7 +670,7 @@ vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16_m( @@ -679,7 +679,7 @@ vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsgt_vv_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_m( @@ -688,7 +688,7 @@ vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8_m( @@ -697,7 +697,7 @@ vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsgt_vv_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_m( @@ -706,7 +706,7 @@ vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4_m( @@ -715,7 +715,7 @@ vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsgt_vv_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_m( @@ -724,7 +724,7 @@ vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64_m( @@ -733,7 +733,7 @@ vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsgt_vv_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_m( @@ -742,7 +742,7 @@ vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32_m( @@ -751,7 +751,7 @@ vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsgt_vv_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_m( @@ -760,7 +760,7 @@ vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16_m( @@ -769,7 +769,7 @@ vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsgt_vv_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_m( @@ -778,7 +778,7 @@ vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8_m( @@ -787,7 +787,7 @@ vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsgt_vv_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsgt_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_m( @@ -796,6 +796,6 @@ vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsgt_vx_i64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgtu.c index f8d4079364d1..1e46342fd9fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgtu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsgtu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsgtu_vv_u8mf8_b64(op1, op2, vl); + return __riscv_vmsgtu_vv_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmsgtu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf8_b64(op1, op2, vl); + return __riscv_vmsgtu_vx_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsgtu_vv_u8mf4_b32(op1, op2, vl); + return __riscv_vmsgtu_vv_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmsgtu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf4_b32(op1, op2, vl); + return __riscv_vmsgtu_vx_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsgtu_vv_u8mf2_b16(op1, op2, vl); + return __riscv_vmsgtu_vv_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmsgtu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf2_b16(op1, op2, vl); + return __riscv_vmsgtu_vx_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsgtu_vv_u8m1_b8(op1, op2, vl); + return __riscv_vmsgtu_vv_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmsgtu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m1_b8(op1, op2, vl); + return __riscv_vmsgtu_vx_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsgtu_vv_u8m2_b4(op1, op2, vl); + return __riscv_vmsgtu_vv_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmsgtu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m2_b4(op1, op2, vl); + return __riscv_vmsgtu_vx_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsgtu_vv_u8m4_b2(op1, op2, vl); + return __riscv_vmsgtu_vv_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmsgtu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m4_b2(op1, op2, vl); + return __riscv_vmsgtu_vx_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1( @@ -121,7 +121,7 @@ vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgtu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsgtu_vv_u8m8_b1(op1, op2, vl); + return __riscv_vmsgtu_vv_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1( @@ -130,7 +130,7 @@ vbool1_t test_vmsgtu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m8_b1(op1, op2, vl); + return __riscv_vmsgtu_vx_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64( @@ -139,7 +139,7 @@ vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsgtu_vv_u16mf4_b64(op1, op2, vl); + return __riscv_vmsgtu_vv_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64( @@ -148,7 +148,7 @@ vbool64_t test_vmsgtu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16mf4_b64(op1, op2, vl); + return __riscv_vmsgtu_vx_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32( @@ -157,7 +157,7 @@ vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsgtu_vv_u16mf2_b32(op1, op2, vl); + return __riscv_vmsgtu_vv_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32( @@ -166,7 +166,7 @@ vbool32_t test_vmsgtu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16mf2_b32(op1, op2, vl); + return __riscv_vmsgtu_vx_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16( @@ -175,7 +175,7 @@ vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsgtu_vv_u16m1_b16(op1, op2, vl); + return __riscv_vmsgtu_vv_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16( @@ -184,7 +184,7 @@ vbool16_t test_vmsgtu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m1_b16(op1, op2, vl); + return __riscv_vmsgtu_vx_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8( @@ -193,7 +193,7 @@ vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsgtu_vv_u16m2_b8(op1, op2, vl); + return __riscv_vmsgtu_vv_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8( @@ -202,7 +202,7 @@ vbool8_t test_vmsgtu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m2_b8(op1, op2, vl); + return __riscv_vmsgtu_vx_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4( @@ -211,7 +211,7 @@ vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsgtu_vv_u16m4_b4(op1, op2, vl); + return __riscv_vmsgtu_vv_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4( @@ -220,7 +220,7 @@ vbool4_t test_vmsgtu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m4_b4(op1, op2, vl); + return __riscv_vmsgtu_vx_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2( @@ -229,7 +229,7 @@ vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsgtu_vv_u16m8_b2(op1, op2, vl); + return __riscv_vmsgtu_vv_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2( @@ -238,7 +238,7 @@ vbool2_t test_vmsgtu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m8_b2(op1, op2, vl); + return __riscv_vmsgtu_vx_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64( @@ -247,7 +247,7 @@ vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsgtu_vv_u32mf2_b64(op1, op2, vl); + return __riscv_vmsgtu_vv_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64( @@ -256,7 +256,7 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32mf2_b64(op1, op2, vl); + return __riscv_vmsgtu_vx_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32( @@ -265,7 +265,7 @@ vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsgtu_vv_u32m1_b32(op1, op2, vl); + return __riscv_vmsgtu_vv_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32( @@ -274,7 +274,7 @@ vbool32_t test_vmsgtu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m1_b32(op1, op2, vl); + return __riscv_vmsgtu_vx_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16( @@ -283,7 +283,7 @@ vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsgtu_vv_u32m2_b16(op1, op2, vl); + return __riscv_vmsgtu_vv_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16( @@ -292,7 +292,7 @@ vbool16_t test_vmsgtu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m2_b16(op1, op2, vl); + return __riscv_vmsgtu_vx_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8( @@ -301,7 +301,7 @@ vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsgtu_vv_u32m4_b8(op1, op2, vl); + return __riscv_vmsgtu_vv_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8( @@ -310,7 +310,7 @@ vbool8_t test_vmsgtu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m4_b8(op1, op2, vl); + return __riscv_vmsgtu_vx_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4( @@ -319,7 +319,7 @@ vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsgtu_vv_u32m8_b4(op1, op2, vl); + return __riscv_vmsgtu_vv_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4( @@ -328,7 +328,7 @@ vbool4_t test_vmsgtu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m8_b4(op1, op2, vl); + return __riscv_vmsgtu_vx_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64( @@ -337,7 +337,7 @@ vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsgtu_vv_u64m1_b64(op1, op2, vl); + return __riscv_vmsgtu_vv_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64( @@ -346,7 +346,7 @@ vbool64_t test_vmsgtu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m1_b64(op1, op2, vl); + return __riscv_vmsgtu_vx_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32( @@ -355,7 +355,7 @@ vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsgtu_vv_u64m2_b32(op1, op2, vl); + return __riscv_vmsgtu_vv_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32( @@ -364,7 +364,7 @@ vbool32_t test_vmsgtu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m2_b32(op1, op2, vl); + return __riscv_vmsgtu_vx_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16( @@ -373,7 +373,7 @@ vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsgtu_vv_u64m4_b16(op1, op2, vl); + return __riscv_vmsgtu_vv_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16( @@ -382,7 +382,7 @@ vbool16_t test_vmsgtu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m4_b16(op1, op2, vl); + return __riscv_vmsgtu_vx_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8( @@ -391,7 +391,7 @@ vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsgtu_vv_u64m8_b8(op1, op2, vl); + return __riscv_vmsgtu_vv_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8( @@ -400,7 +400,7 @@ vbool8_t test_vmsgtu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m8_b8(op1, op2, vl); + return __riscv_vmsgtu_vx_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf8_b64_m( @@ -409,7 +409,7 @@ vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsgtu_vv_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_m( @@ -418,7 +418,7 @@ vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32_m( @@ -427,7 +427,7 @@ vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsgtu_vv_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_m( @@ -436,7 +436,7 @@ vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16_m( @@ -445,7 +445,7 @@ vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsgtu_vv_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_m( @@ -454,7 +454,7 @@ vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8_m( @@ -463,7 +463,7 @@ vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsgtu_vv_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_m( @@ -472,7 +472,7 @@ vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4_m( @@ -481,7 +481,7 @@ vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsgtu_vv_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_m( @@ -490,7 +490,7 @@ vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2_m( @@ -499,7 +499,7 @@ vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsgtu_vv_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_m( @@ -508,7 +508,7 @@ vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1_m( @@ -517,7 +517,7 @@ vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsgtu_vv_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_m( @@ -526,7 +526,7 @@ vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64_m( @@ -535,7 +535,7 @@ vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsgtu_vv_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_m( @@ -544,7 +544,7 @@ vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32_m( @@ -553,7 +553,7 @@ vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsgtu_vv_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_m( @@ -562,7 +562,7 @@ vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16_m( @@ -571,7 +571,7 @@ vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsgtu_vv_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_m( @@ -580,7 +580,7 @@ vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8_m( @@ -589,7 +589,7 @@ vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsgtu_vv_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_m( @@ -598,7 +598,7 @@ vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4_m( @@ -607,7 +607,7 @@ vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsgtu_vv_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_m( @@ -616,7 +616,7 @@ vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2_m( @@ -625,7 +625,7 @@ vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsgtu_vv_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_m( @@ -634,7 +634,7 @@ vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_m( @@ -643,7 +643,7 @@ vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsgtu_vv_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_m( @@ -652,7 +652,7 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32_m( @@ -661,7 +661,7 @@ vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsgtu_vv_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_m( @@ -670,7 +670,7 @@ vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16_m( @@ -679,7 +679,7 @@ vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsgtu_vv_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_m( @@ -688,7 +688,7 @@ vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8_m( @@ -697,7 +697,7 @@ vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsgtu_vv_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_m( @@ -706,7 +706,7 @@ vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4_m( @@ -715,7 +715,7 @@ vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsgtu_vv_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_m( @@ -724,7 +724,7 @@ vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64_m( @@ -733,7 +733,7 @@ vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsgtu_vv_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_m( @@ -742,7 +742,7 @@ vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32_m( @@ -751,7 +751,7 @@ vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsgtu_vv_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_m( @@ -760,7 +760,7 @@ vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16_m( @@ -769,7 +769,7 @@ vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsgtu_vv_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_m( @@ -778,7 +778,7 @@ vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8_m( @@ -787,7 +787,7 @@ vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsgtu_vv_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_m( @@ -796,6 +796,6 @@ vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsgtu_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsif.c index cd3bb94d22e2..23806d9eff07 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsif.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsif.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) { - return vmsif_m_b1(op1, vl); + return __riscv_vmsif_m_b1(op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) { - return vmsif_m_b2(op1, vl); + return __riscv_vmsif_m_b2(op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) { - return vmsif_m_b4(op1, vl); + return __riscv_vmsif_m_b4(op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { - return vmsif_m_b8(op1, vl); + return __riscv_vmsif_m_b8(op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) { - return vmsif_m_b16(op1, vl); + return __riscv_vmsif_m_b16(op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) { - return vmsif_m_b32(op1, vl); + return __riscv_vmsif_m_b32(op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b64( @@ -66,7 +66,7 @@ vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { - return vmsif_m_b64(op1, vl); + return __riscv_vmsif_m_b64(op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b1_m( @@ -75,7 +75,7 @@ vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return vmsif_m_b1_m(mask, op1, vl); + return __riscv_vmsif_m_b1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b2_m( @@ -84,7 +84,7 @@ vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return vmsif_m_b2_m(mask, op1, vl); + return __riscv_vmsif_m_b2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b4_m( @@ -93,7 +93,7 @@ vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return vmsif_m_b4_m(mask, op1, vl); + return __riscv_vmsif_m_b4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b8_m( @@ -102,7 +102,7 @@ vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return vmsif_m_b8_m(mask, op1, vl); + return __riscv_vmsif_m_b8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b16_m( @@ -111,7 +111,7 @@ vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return vmsif_m_b16_m(mask, op1, vl); + return __riscv_vmsif_m_b16_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b32_m( @@ -120,7 +120,7 @@ vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return vmsif_m_b32_m(mask, op1, vl); + return __riscv_vmsif_m_b32_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b64_m( @@ -129,6 +129,6 @@ vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return vmsif_m_b64_m(mask, op1, vl); + return __riscv_vmsif_m_b64_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsle.c index b067a7730d86..7b42d194a4b6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsle.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsle_vv_i8mf8_b64(op1, op2, vl); + return __riscv_vmsle_vv_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf8_b64(op1, op2, vl); + return __riscv_vmsle_vx_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsle_vv_i8mf4_b32(op1, op2, vl); + return __riscv_vmsle_vv_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf4_b32(op1, op2, vl); + return __riscv_vmsle_vx_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsle_vv_i8mf2_b16(op1, op2, vl); + return __riscv_vmsle_vv_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf2_b16(op1, op2, vl); + return __riscv_vmsle_vx_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsle_vv_i8m1_b8(op1, op2, vl); + return __riscv_vmsle_vv_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m1_b8(op1, op2, vl); + return __riscv_vmsle_vx_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsle_vv_i8m2_b4(op1, op2, vl); + return __riscv_vmsle_vv_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m2_b4(op1, op2, vl); + return __riscv_vmsle_vx_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsle_vv_i8m4_b2(op1, op2, vl); + return __riscv_vmsle_vv_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m4_b2(op1, op2, vl); + return __riscv_vmsle_vx_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1( @@ -121,7 +121,7 @@ vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsle_vv_i8m8_b1(op1, op2, vl); + return __riscv_vmsle_vv_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1( @@ -130,7 +130,7 @@ vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m8_b1(op1, op2, vl); + return __riscv_vmsle_vx_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64( @@ -139,7 +139,7 @@ vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsle_vv_i16mf4_b64(op1, op2, vl); + return __riscv_vmsle_vv_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64( @@ -148,7 +148,7 @@ vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16mf4_b64(op1, op2, vl); + return __riscv_vmsle_vx_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32( @@ -157,7 +157,7 @@ vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsle_vv_i16mf2_b32(op1, op2, vl); + return __riscv_vmsle_vv_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32( @@ -166,7 +166,7 @@ vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16mf2_b32(op1, op2, vl); + return __riscv_vmsle_vx_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16( @@ -175,7 +175,7 @@ vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsle_vv_i16m1_b16(op1, op2, vl); + return __riscv_vmsle_vv_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16( @@ -184,7 +184,7 @@ vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m1_b16(op1, op2, vl); + return __riscv_vmsle_vx_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8( @@ -193,7 +193,7 @@ vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsle_vv_i16m2_b8(op1, op2, vl); + return __riscv_vmsle_vv_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8( @@ -202,7 +202,7 @@ vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m2_b8(op1, op2, vl); + return __riscv_vmsle_vx_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4( @@ -211,7 +211,7 @@ vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsle_vv_i16m4_b4(op1, op2, vl); + return __riscv_vmsle_vv_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4( @@ -220,7 +220,7 @@ vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m4_b4(op1, op2, vl); + return __riscv_vmsle_vx_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2( @@ -229,7 +229,7 @@ vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsle_vv_i16m8_b2(op1, op2, vl); + return __riscv_vmsle_vv_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2( @@ -238,7 +238,7 @@ vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m8_b2(op1, op2, vl); + return __riscv_vmsle_vx_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64( @@ -247,7 +247,7 @@ vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsle_vv_i32mf2_b64(op1, op2, vl); + return __riscv_vmsle_vv_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64( @@ -256,7 +256,7 @@ vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32mf2_b64(op1, op2, vl); + return __riscv_vmsle_vx_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32( @@ -265,7 +265,7 @@ vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsle_vv_i32m1_b32(op1, op2, vl); + return __riscv_vmsle_vv_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32( @@ -274,7 +274,7 @@ vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m1_b32(op1, op2, vl); + return __riscv_vmsle_vx_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16( @@ -283,7 +283,7 @@ vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsle_vv_i32m2_b16(op1, op2, vl); + return __riscv_vmsle_vv_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16( @@ -292,7 +292,7 @@ vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m2_b16(op1, op2, vl); + return __riscv_vmsle_vx_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8( @@ -301,7 +301,7 @@ vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsle_vv_i32m4_b8(op1, op2, vl); + return __riscv_vmsle_vv_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8( @@ -310,7 +310,7 @@ vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m4_b8(op1, op2, vl); + return __riscv_vmsle_vx_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4( @@ -319,7 +319,7 @@ vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsle_vv_i32m8_b4(op1, op2, vl); + return __riscv_vmsle_vv_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4( @@ -328,7 +328,7 @@ vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m8_b4(op1, op2, vl); + return __riscv_vmsle_vx_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64( @@ -337,7 +337,7 @@ vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsle_vv_i64m1_b64(op1, op2, vl); + return __riscv_vmsle_vv_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64( @@ -346,7 +346,7 @@ vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m1_b64(op1, op2, vl); + return __riscv_vmsle_vx_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32( @@ -355,7 +355,7 @@ vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsle_vv_i64m2_b32(op1, op2, vl); + return __riscv_vmsle_vv_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32( @@ -364,7 +364,7 @@ vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m2_b32(op1, op2, vl); + return __riscv_vmsle_vx_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16( @@ -373,7 +373,7 @@ vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsle_vv_i64m4_b16(op1, op2, vl); + return __riscv_vmsle_vv_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16( @@ -382,7 +382,7 @@ vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m4_b16(op1, op2, vl); + return __riscv_vmsle_vx_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8( @@ -391,7 +391,7 @@ vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsle_vv_i64m8_b8(op1, op2, vl); + return __riscv_vmsle_vv_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8( @@ -400,7 +400,7 @@ vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m8_b8(op1, op2, vl); + return __riscv_vmsle_vx_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m( @@ -409,7 +409,7 @@ vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsle_vv_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m( @@ -418,7 +418,7 @@ vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m( @@ -427,7 +427,7 @@ vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsle_vv_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m( @@ -436,7 +436,7 @@ vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m( @@ -445,7 +445,7 @@ vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsle_vv_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m( @@ -454,7 +454,7 @@ vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m( @@ -463,7 +463,7 @@ vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsle_vv_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m( @@ -472,7 +472,7 @@ vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m( @@ -481,7 +481,7 @@ vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsle_vv_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m( @@ -490,7 +490,7 @@ vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m( @@ -499,7 +499,7 @@ vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsle_vv_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m( @@ -508,7 +508,7 @@ vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m( @@ -517,7 +517,7 @@ vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsle_vv_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m( @@ -526,7 +526,7 @@ vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m( @@ -535,7 +535,7 @@ vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsle_vv_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m( @@ -544,7 +544,7 @@ vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m( @@ -553,7 +553,7 @@ vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsle_vv_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m( @@ -562,7 +562,7 @@ vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m( @@ -571,7 +571,7 @@ vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsle_vv_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m( @@ -580,7 +580,7 @@ vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m( @@ -589,7 +589,7 @@ vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsle_vv_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m( @@ -598,7 +598,7 @@ vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m( @@ -607,7 +607,7 @@ vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsle_vv_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m( @@ -616,7 +616,7 @@ vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m( @@ -625,7 +625,7 @@ vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsle_vv_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m( @@ -634,7 +634,7 @@ vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m( @@ -643,7 +643,7 @@ vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsle_vv_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m( @@ -652,7 +652,7 @@ vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m( @@ -661,7 +661,7 @@ vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsle_vv_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m( @@ -670,7 +670,7 @@ vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m( @@ -679,7 +679,7 @@ vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsle_vv_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m( @@ -688,7 +688,7 @@ vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m( @@ -697,7 +697,7 @@ vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsle_vv_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m( @@ -706,7 +706,7 @@ vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m( @@ -715,7 +715,7 @@ vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsle_vv_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m( @@ -724,7 +724,7 @@ vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m( @@ -733,7 +733,7 @@ vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsle_vv_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m( @@ -742,7 +742,7 @@ vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m( @@ -751,7 +751,7 @@ vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsle_vv_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m( @@ -760,7 +760,7 @@ vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m( @@ -769,7 +769,7 @@ vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsle_vv_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m( @@ -778,7 +778,7 @@ vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m( @@ -787,7 +787,7 @@ vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsle_vv_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsle_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m( @@ -796,6 +796,6 @@ vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsle_vx_i64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsleu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsleu.c index dbb2078adbe6..29cd51ac7f63 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsleu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsleu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsleu_vv_u8mf8_b64(op1, op2, vl); + return __riscv_vmsleu_vv_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf8_b64(op1, op2, vl); + return __riscv_vmsleu_vx_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsleu_vv_u8mf4_b32(op1, op2, vl); + return __riscv_vmsleu_vv_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf4_b32(op1, op2, vl); + return __riscv_vmsleu_vx_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsleu_vv_u8mf2_b16(op1, op2, vl); + return __riscv_vmsleu_vv_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf2_b16(op1, op2, vl); + return __riscv_vmsleu_vx_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsleu_vv_u8m1_b8(op1, op2, vl); + return __riscv_vmsleu_vv_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m1_b8(op1, op2, vl); + return __riscv_vmsleu_vx_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsleu_vv_u8m2_b4(op1, op2, vl); + return __riscv_vmsleu_vv_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m2_b4(op1, op2, vl); + return __riscv_vmsleu_vx_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsleu_vv_u8m4_b2(op1, op2, vl); + return __riscv_vmsleu_vv_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m4_b2(op1, op2, vl); + return __riscv_vmsleu_vx_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1( @@ -121,7 +121,7 @@ vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsleu_vv_u8m8_b1(op1, op2, vl); + return __riscv_vmsleu_vv_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1( @@ -130,7 +130,7 @@ vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m8_b1(op1, op2, vl); + return __riscv_vmsleu_vx_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64( @@ -139,7 +139,7 @@ vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsleu_vv_u16mf4_b64(op1, op2, vl); + return __riscv_vmsleu_vv_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64( @@ -148,7 +148,7 @@ vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16mf4_b64(op1, op2, vl); + return __riscv_vmsleu_vx_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32( @@ -157,7 +157,7 @@ vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsleu_vv_u16mf2_b32(op1, op2, vl); + return __riscv_vmsleu_vv_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32( @@ -166,7 +166,7 @@ vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16mf2_b32(op1, op2, vl); + return __riscv_vmsleu_vx_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16( @@ -175,7 +175,7 @@ vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsleu_vv_u16m1_b16(op1, op2, vl); + return __riscv_vmsleu_vv_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16( @@ -184,7 +184,7 @@ vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m1_b16(op1, op2, vl); + return __riscv_vmsleu_vx_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8( @@ -193,7 +193,7 @@ vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsleu_vv_u16m2_b8(op1, op2, vl); + return __riscv_vmsleu_vv_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8( @@ -202,7 +202,7 @@ vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m2_b8(op1, op2, vl); + return __riscv_vmsleu_vx_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4( @@ -211,7 +211,7 @@ vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsleu_vv_u16m4_b4(op1, op2, vl); + return __riscv_vmsleu_vv_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4( @@ -220,7 +220,7 @@ vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m4_b4(op1, op2, vl); + return __riscv_vmsleu_vx_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2( @@ -229,7 +229,7 @@ vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsleu_vv_u16m8_b2(op1, op2, vl); + return __riscv_vmsleu_vv_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2( @@ -238,7 +238,7 @@ vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m8_b2(op1, op2, vl); + return __riscv_vmsleu_vx_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64( @@ -247,7 +247,7 @@ vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsleu_vv_u32mf2_b64(op1, op2, vl); + return __riscv_vmsleu_vv_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64( @@ -256,7 +256,7 @@ vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32mf2_b64(op1, op2, vl); + return __riscv_vmsleu_vx_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32( @@ -265,7 +265,7 @@ vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsleu_vv_u32m1_b32(op1, op2, vl); + return __riscv_vmsleu_vv_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32( @@ -274,7 +274,7 @@ vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m1_b32(op1, op2, vl); + return __riscv_vmsleu_vx_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16( @@ -283,7 +283,7 @@ vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsleu_vv_u32m2_b16(op1, op2, vl); + return __riscv_vmsleu_vv_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16( @@ -292,7 +292,7 @@ vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m2_b16(op1, op2, vl); + return __riscv_vmsleu_vx_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8( @@ -301,7 +301,7 @@ vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsleu_vv_u32m4_b8(op1, op2, vl); + return __riscv_vmsleu_vv_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8( @@ -310,7 +310,7 @@ vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m4_b8(op1, op2, vl); + return __riscv_vmsleu_vx_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4( @@ -319,7 +319,7 @@ vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsleu_vv_u32m8_b4(op1, op2, vl); + return __riscv_vmsleu_vv_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4( @@ -328,7 +328,7 @@ vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m8_b4(op1, op2, vl); + return __riscv_vmsleu_vx_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64( @@ -337,7 +337,7 @@ vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsleu_vv_u64m1_b64(op1, op2, vl); + return __riscv_vmsleu_vv_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64( @@ -346,7 +346,7 @@ vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m1_b64(op1, op2, vl); + return __riscv_vmsleu_vx_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32( @@ -355,7 +355,7 @@ vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsleu_vv_u64m2_b32(op1, op2, vl); + return __riscv_vmsleu_vv_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32( @@ -364,7 +364,7 @@ vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m2_b32(op1, op2, vl); + return __riscv_vmsleu_vx_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16( @@ -373,7 +373,7 @@ vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsleu_vv_u64m4_b16(op1, op2, vl); + return __riscv_vmsleu_vv_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16( @@ -382,7 +382,7 @@ vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m4_b16(op1, op2, vl); + return __riscv_vmsleu_vx_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8( @@ -391,7 +391,7 @@ vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsleu_vv_u64m8_b8(op1, op2, vl); + return __riscv_vmsleu_vv_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8( @@ -400,7 +400,7 @@ vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m8_b8(op1, op2, vl); + return __riscv_vmsleu_vx_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m( @@ -409,7 +409,7 @@ vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsleu_vv_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m( @@ -418,7 +418,7 @@ vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m( @@ -427,7 +427,7 @@ vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsleu_vv_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m( @@ -436,7 +436,7 @@ vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m( @@ -445,7 +445,7 @@ vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsleu_vv_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m( @@ -454,7 +454,7 @@ vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m( @@ -463,7 +463,7 @@ vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsleu_vv_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m( @@ -472,7 +472,7 @@ vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m( @@ -481,7 +481,7 @@ vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsleu_vv_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m( @@ -490,7 +490,7 @@ vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m( @@ -499,7 +499,7 @@ vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsleu_vv_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m( @@ -508,7 +508,7 @@ vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m( @@ -517,7 +517,7 @@ vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsleu_vv_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m( @@ -526,7 +526,7 @@ vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m( @@ -535,7 +535,7 @@ vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsleu_vv_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m( @@ -544,7 +544,7 @@ vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m( @@ -553,7 +553,7 @@ vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsleu_vv_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m( @@ -562,7 +562,7 @@ vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m( @@ -571,7 +571,7 @@ vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsleu_vv_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m( @@ -580,7 +580,7 @@ vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m( @@ -589,7 +589,7 @@ vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsleu_vv_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m( @@ -598,7 +598,7 @@ vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m( @@ -607,7 +607,7 @@ vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsleu_vv_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m( @@ -616,7 +616,7 @@ vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m( @@ -625,7 +625,7 @@ vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsleu_vv_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m( @@ -634,7 +634,7 @@ vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m( @@ -643,7 +643,7 @@ vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsleu_vv_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m( @@ -652,7 +652,7 @@ vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m( @@ -661,7 +661,7 @@ vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsleu_vv_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m( @@ -670,7 +670,7 @@ vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m( @@ -679,7 +679,7 @@ vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsleu_vv_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m( @@ -688,7 +688,7 @@ vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m( @@ -697,7 +697,7 @@ vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsleu_vv_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m( @@ -706,7 +706,7 @@ vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m( @@ -715,7 +715,7 @@ vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsleu_vv_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m( @@ -724,7 +724,7 @@ vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m( @@ -733,7 +733,7 @@ vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsleu_vv_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m( @@ -742,7 +742,7 @@ vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m( @@ -751,7 +751,7 @@ vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsleu_vv_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m( @@ -760,7 +760,7 @@ vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m( @@ -769,7 +769,7 @@ vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsleu_vv_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m( @@ -778,7 +778,7 @@ vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m( @@ -787,7 +787,7 @@ vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsleu_vv_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsleu_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m( @@ -796,6 +796,6 @@ vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsleu_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmslt.c index 64f5414e0113..c85841fb41e1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmslt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmslt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmslt_vv_i8mf8_b64(op1, op2, vl); + return __riscv_vmslt_vv_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf8_b64(op1, op2, vl); + return __riscv_vmslt_vx_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmslt_vv_i8mf4_b32(op1, op2, vl); + return __riscv_vmslt_vv_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf4_b32(op1, op2, vl); + return __riscv_vmslt_vx_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmslt_vv_i8mf2_b16(op1, op2, vl); + return __riscv_vmslt_vv_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf2_b16(op1, op2, vl); + return __riscv_vmslt_vx_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmslt_vv_i8m1_b8(op1, op2, vl); + return __riscv_vmslt_vv_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m1_b8(op1, op2, vl); + return __riscv_vmslt_vx_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmslt_vv_i8m2_b4(op1, op2, vl); + return __riscv_vmslt_vv_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m2_b4(op1, op2, vl); + return __riscv_vmslt_vx_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmslt_vv_i8m4_b2(op1, op2, vl); + return __riscv_vmslt_vv_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m4_b2(op1, op2, vl); + return __riscv_vmslt_vx_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1( @@ -121,7 +121,7 @@ vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmslt_vv_i8m8_b1(op1, op2, vl); + return __riscv_vmslt_vv_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1( @@ -130,7 +130,7 @@ vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m8_b1(op1, op2, vl); + return __riscv_vmslt_vx_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64( @@ -139,7 +139,7 @@ vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmslt_vv_i16mf4_b64(op1, op2, vl); + return __riscv_vmslt_vv_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64( @@ -148,7 +148,7 @@ vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16mf4_b64(op1, op2, vl); + return __riscv_vmslt_vx_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32( @@ -157,7 +157,7 @@ vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmslt_vv_i16mf2_b32(op1, op2, vl); + return __riscv_vmslt_vv_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32( @@ -166,7 +166,7 @@ vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16mf2_b32(op1, op2, vl); + return __riscv_vmslt_vx_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16( @@ -175,7 +175,7 @@ vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmslt_vv_i16m1_b16(op1, op2, vl); + return __riscv_vmslt_vv_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16( @@ -184,7 +184,7 @@ vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m1_b16(op1, op2, vl); + return __riscv_vmslt_vx_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8( @@ -193,7 +193,7 @@ vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmslt_vv_i16m2_b8(op1, op2, vl); + return __riscv_vmslt_vv_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8( @@ -202,7 +202,7 @@ vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m2_b8(op1, op2, vl); + return __riscv_vmslt_vx_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4( @@ -211,7 +211,7 @@ vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmslt_vv_i16m4_b4(op1, op2, vl); + return __riscv_vmslt_vv_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4( @@ -220,7 +220,7 @@ vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m4_b4(op1, op2, vl); + return __riscv_vmslt_vx_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2( @@ -229,7 +229,7 @@ vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmslt_vv_i16m8_b2(op1, op2, vl); + return __riscv_vmslt_vv_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2( @@ -238,7 +238,7 @@ vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m8_b2(op1, op2, vl); + return __riscv_vmslt_vx_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64( @@ -247,7 +247,7 @@ vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmslt_vv_i32mf2_b64(op1, op2, vl); + return __riscv_vmslt_vv_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64( @@ -256,7 +256,7 @@ vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32mf2_b64(op1, op2, vl); + return __riscv_vmslt_vx_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32( @@ -265,7 +265,7 @@ vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmslt_vv_i32m1_b32(op1, op2, vl); + return __riscv_vmslt_vv_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32( @@ -274,7 +274,7 @@ vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m1_b32(op1, op2, vl); + return __riscv_vmslt_vx_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16( @@ -283,7 +283,7 @@ vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmslt_vv_i32m2_b16(op1, op2, vl); + return __riscv_vmslt_vv_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16( @@ -292,7 +292,7 @@ vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m2_b16(op1, op2, vl); + return __riscv_vmslt_vx_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8( @@ -301,7 +301,7 @@ vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmslt_vv_i32m4_b8(op1, op2, vl); + return __riscv_vmslt_vv_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8( @@ -310,7 +310,7 @@ vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m4_b8(op1, op2, vl); + return __riscv_vmslt_vx_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4( @@ -319,7 +319,7 @@ vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmslt_vv_i32m8_b4(op1, op2, vl); + return __riscv_vmslt_vv_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4( @@ -328,7 +328,7 @@ vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m8_b4(op1, op2, vl); + return __riscv_vmslt_vx_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64( @@ -337,7 +337,7 @@ vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmslt_vv_i64m1_b64(op1, op2, vl); + return __riscv_vmslt_vv_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64( @@ -346,7 +346,7 @@ vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m1_b64(op1, op2, vl); + return __riscv_vmslt_vx_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32( @@ -355,7 +355,7 @@ vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmslt_vv_i64m2_b32(op1, op2, vl); + return __riscv_vmslt_vv_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32( @@ -364,7 +364,7 @@ vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m2_b32(op1, op2, vl); + return __riscv_vmslt_vx_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16( @@ -373,7 +373,7 @@ vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmslt_vv_i64m4_b16(op1, op2, vl); + return __riscv_vmslt_vv_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16( @@ -382,7 +382,7 @@ vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m4_b16(op1, op2, vl); + return __riscv_vmslt_vx_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8( @@ -391,7 +391,7 @@ vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmslt_vv_i64m8_b8(op1, op2, vl); + return __riscv_vmslt_vv_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8( @@ -400,7 +400,7 @@ vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m8_b8(op1, op2, vl); + return __riscv_vmslt_vx_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64_m( @@ -409,7 +409,7 @@ vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmslt_vv_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_m( @@ -418,7 +418,7 @@ vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_m( @@ -427,7 +427,7 @@ vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmslt_vv_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_m( @@ -436,7 +436,7 @@ vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_m( @@ -445,7 +445,7 @@ vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmslt_vv_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_m( @@ -454,7 +454,7 @@ vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_m( @@ -463,7 +463,7 @@ vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmslt_vv_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_m( @@ -472,7 +472,7 @@ vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_m( @@ -481,7 +481,7 @@ vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmslt_vv_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_m( @@ -490,7 +490,7 @@ vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_m( @@ -499,7 +499,7 @@ vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmslt_vv_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_m( @@ -508,7 +508,7 @@ vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_m( @@ -517,7 +517,7 @@ vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmslt_vv_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_m( @@ -526,7 +526,7 @@ vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_m( @@ -535,7 +535,7 @@ vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmslt_vv_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_m( @@ -544,7 +544,7 @@ vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_m( @@ -553,7 +553,7 @@ vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmslt_vv_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_m( @@ -562,7 +562,7 @@ vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_m( @@ -571,7 +571,7 @@ vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmslt_vv_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_m( @@ -580,7 +580,7 @@ vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_m( @@ -589,7 +589,7 @@ vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmslt_vv_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_m( @@ -598,7 +598,7 @@ vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_m( @@ -607,7 +607,7 @@ vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmslt_vv_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_m( @@ -616,7 +616,7 @@ vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_m( @@ -625,7 +625,7 @@ vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmslt_vv_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_m( @@ -634,7 +634,7 @@ vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_m( @@ -643,7 +643,7 @@ vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmslt_vv_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_m( @@ -652,7 +652,7 @@ vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_m( @@ -661,7 +661,7 @@ vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmslt_vv_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_m( @@ -670,7 +670,7 @@ vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_m( @@ -679,7 +679,7 @@ vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmslt_vv_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_m( @@ -688,7 +688,7 @@ vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_m( @@ -697,7 +697,7 @@ vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmslt_vv_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_m( @@ -706,7 +706,7 @@ vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_m( @@ -715,7 +715,7 @@ vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmslt_vv_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_m( @@ -724,7 +724,7 @@ vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_m( @@ -733,7 +733,7 @@ vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmslt_vv_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_m( @@ -742,7 +742,7 @@ vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_m( @@ -751,7 +751,7 @@ vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmslt_vv_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_m( @@ -760,7 +760,7 @@ vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_m( @@ -769,7 +769,7 @@ vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmslt_vv_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_m( @@ -778,7 +778,7 @@ vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_m( @@ -787,7 +787,7 @@ vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmslt_vv_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmslt_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_m( @@ -796,6 +796,6 @@ vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmslt_vx_i64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsltu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsltu.c index 09e2f3e635c5..b98a91d53f24 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsltu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsltu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsltu_vv_u8mf8_b64(op1, op2, vl); + return __riscv_vmsltu_vv_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf8_b64(op1, op2, vl); + return __riscv_vmsltu_vx_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsltu_vv_u8mf4_b32(op1, op2, vl); + return __riscv_vmsltu_vv_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf4_b32(op1, op2, vl); + return __riscv_vmsltu_vx_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsltu_vv_u8mf2_b16(op1, op2, vl); + return __riscv_vmsltu_vv_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf2_b16(op1, op2, vl); + return __riscv_vmsltu_vx_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsltu_vv_u8m1_b8(op1, op2, vl); + return __riscv_vmsltu_vv_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m1_b8(op1, op2, vl); + return __riscv_vmsltu_vx_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsltu_vv_u8m2_b4(op1, op2, vl); + return __riscv_vmsltu_vv_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m2_b4(op1, op2, vl); + return __riscv_vmsltu_vx_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsltu_vv_u8m4_b2(op1, op2, vl); + return __riscv_vmsltu_vv_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m4_b2(op1, op2, vl); + return __riscv_vmsltu_vx_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1( @@ -121,7 +121,7 @@ vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsltu_vv_u8m8_b1(op1, op2, vl); + return __riscv_vmsltu_vv_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1( @@ -130,7 +130,7 @@ vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m8_b1(op1, op2, vl); + return __riscv_vmsltu_vx_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64( @@ -139,7 +139,7 @@ vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsltu_vv_u16mf4_b64(op1, op2, vl); + return __riscv_vmsltu_vv_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64( @@ -148,7 +148,7 @@ vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16mf4_b64(op1, op2, vl); + return __riscv_vmsltu_vx_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32( @@ -157,7 +157,7 @@ vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsltu_vv_u16mf2_b32(op1, op2, vl); + return __riscv_vmsltu_vv_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32( @@ -166,7 +166,7 @@ vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16mf2_b32(op1, op2, vl); + return __riscv_vmsltu_vx_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16( @@ -175,7 +175,7 @@ vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsltu_vv_u16m1_b16(op1, op2, vl); + return __riscv_vmsltu_vv_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16( @@ -184,7 +184,7 @@ vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m1_b16(op1, op2, vl); + return __riscv_vmsltu_vx_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8( @@ -193,7 +193,7 @@ vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsltu_vv_u16m2_b8(op1, op2, vl); + return __riscv_vmsltu_vv_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8( @@ -202,7 +202,7 @@ vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m2_b8(op1, op2, vl); + return __riscv_vmsltu_vx_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4( @@ -211,7 +211,7 @@ vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsltu_vv_u16m4_b4(op1, op2, vl); + return __riscv_vmsltu_vv_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4( @@ -220,7 +220,7 @@ vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m4_b4(op1, op2, vl); + return __riscv_vmsltu_vx_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2( @@ -229,7 +229,7 @@ vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsltu_vv_u16m8_b2(op1, op2, vl); + return __riscv_vmsltu_vv_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2( @@ -238,7 +238,7 @@ vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m8_b2(op1, op2, vl); + return __riscv_vmsltu_vx_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64( @@ -247,7 +247,7 @@ vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsltu_vv_u32mf2_b64(op1, op2, vl); + return __riscv_vmsltu_vv_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64( @@ -256,7 +256,7 @@ vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32mf2_b64(op1, op2, vl); + return __riscv_vmsltu_vx_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32( @@ -265,7 +265,7 @@ vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsltu_vv_u32m1_b32(op1, op2, vl); + return __riscv_vmsltu_vv_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32( @@ -274,7 +274,7 @@ vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m1_b32(op1, op2, vl); + return __riscv_vmsltu_vx_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16( @@ -283,7 +283,7 @@ vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsltu_vv_u32m2_b16(op1, op2, vl); + return __riscv_vmsltu_vv_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16( @@ -292,7 +292,7 @@ vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m2_b16(op1, op2, vl); + return __riscv_vmsltu_vx_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8( @@ -301,7 +301,7 @@ vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsltu_vv_u32m4_b8(op1, op2, vl); + return __riscv_vmsltu_vv_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8( @@ -310,7 +310,7 @@ vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m4_b8(op1, op2, vl); + return __riscv_vmsltu_vx_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4( @@ -319,7 +319,7 @@ vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsltu_vv_u32m8_b4(op1, op2, vl); + return __riscv_vmsltu_vv_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4( @@ -328,7 +328,7 @@ vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m8_b4(op1, op2, vl); + return __riscv_vmsltu_vx_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64( @@ -337,7 +337,7 @@ vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsltu_vv_u64m1_b64(op1, op2, vl); + return __riscv_vmsltu_vv_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64( @@ -346,7 +346,7 @@ vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m1_b64(op1, op2, vl); + return __riscv_vmsltu_vx_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32( @@ -355,7 +355,7 @@ vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsltu_vv_u64m2_b32(op1, op2, vl); + return __riscv_vmsltu_vv_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32( @@ -364,7 +364,7 @@ vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m2_b32(op1, op2, vl); + return __riscv_vmsltu_vx_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16( @@ -373,7 +373,7 @@ vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsltu_vv_u64m4_b16(op1, op2, vl); + return __riscv_vmsltu_vv_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16( @@ -382,7 +382,7 @@ vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m4_b16(op1, op2, vl); + return __riscv_vmsltu_vx_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8( @@ -391,7 +391,7 @@ vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsltu_vv_u64m8_b8(op1, op2, vl); + return __riscv_vmsltu_vv_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8( @@ -400,7 +400,7 @@ vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m8_b8(op1, op2, vl); + return __riscv_vmsltu_vx_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64_m( @@ -409,7 +409,7 @@ vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsltu_vv_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_m( @@ -418,7 +418,7 @@ vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_m( @@ -427,7 +427,7 @@ vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsltu_vv_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_m( @@ -436,7 +436,7 @@ vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_m( @@ -445,7 +445,7 @@ vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsltu_vv_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_m( @@ -454,7 +454,7 @@ vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_m( @@ -463,7 +463,7 @@ vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsltu_vv_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_m( @@ -472,7 +472,7 @@ vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_m( @@ -481,7 +481,7 @@ vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsltu_vv_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_m( @@ -490,7 +490,7 @@ vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_m( @@ -499,7 +499,7 @@ vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsltu_vv_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_m( @@ -508,7 +508,7 @@ vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_m( @@ -517,7 +517,7 @@ vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsltu_vv_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_m( @@ -526,7 +526,7 @@ vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_m( @@ -535,7 +535,7 @@ vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsltu_vv_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_m( @@ -544,7 +544,7 @@ vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_m( @@ -553,7 +553,7 @@ vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsltu_vv_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_m( @@ -562,7 +562,7 @@ vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_m( @@ -571,7 +571,7 @@ vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsltu_vv_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_m( @@ -580,7 +580,7 @@ vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_m( @@ -589,7 +589,7 @@ vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsltu_vv_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_m( @@ -598,7 +598,7 @@ vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_m( @@ -607,7 +607,7 @@ vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsltu_vv_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_m( @@ -616,7 +616,7 @@ vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_m( @@ -625,7 +625,7 @@ vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsltu_vv_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_m( @@ -634,7 +634,7 @@ vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_m( @@ -643,7 +643,7 @@ vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsltu_vv_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_m( @@ -652,7 +652,7 @@ vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_m( @@ -661,7 +661,7 @@ vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsltu_vv_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_m( @@ -670,7 +670,7 @@ vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_m( @@ -679,7 +679,7 @@ vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsltu_vv_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_m( @@ -688,7 +688,7 @@ vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_m( @@ -697,7 +697,7 @@ vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsltu_vv_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_m( @@ -706,7 +706,7 @@ vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_m( @@ -715,7 +715,7 @@ vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsltu_vv_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_m( @@ -724,7 +724,7 @@ vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_m( @@ -733,7 +733,7 @@ vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsltu_vv_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_m( @@ -742,7 +742,7 @@ vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_m( @@ -751,7 +751,7 @@ vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsltu_vv_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_m( @@ -760,7 +760,7 @@ vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_m( @@ -769,7 +769,7 @@ vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsltu_vv_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_m( @@ -778,7 +778,7 @@ vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_m( @@ -787,7 +787,7 @@ vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsltu_vv_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsltu_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_m( @@ -796,6 +796,6 @@ vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsltu_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsne.c index 7112d5c2384e..25ddf5cfa481 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsne.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsne_vv_i8mf8_b64(op1, op2, vl); + return __riscv_vmsne_vv_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64( @@ -22,7 +22,7 @@ vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf8_b64(op1, op2, vl); + return __riscv_vmsne_vx_i8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32( @@ -31,7 +31,7 @@ vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsne_vv_i8mf4_b32(op1, op2, vl); + return __riscv_vmsne_vv_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32( @@ -40,7 +40,7 @@ vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf4_b32(op1, op2, vl); + return __riscv_vmsne_vx_i8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16( @@ -49,7 +49,7 @@ vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsne_vv_i8mf2_b16(op1, op2, vl); + return __riscv_vmsne_vv_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16( @@ -58,7 +58,7 @@ vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf2_b16(op1, op2, vl); + return __riscv_vmsne_vx_i8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8( @@ -67,7 +67,7 @@ vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsne_vv_i8m1_b8(op1, op2, vl); + return __riscv_vmsne_vv_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8( @@ -76,7 +76,7 @@ vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m1_b8(op1, op2, vl); + return __riscv_vmsne_vx_i8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4( @@ -85,7 +85,7 @@ vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsne_vv_i8m2_b4(op1, op2, vl); + return __riscv_vmsne_vv_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4( @@ -94,7 +94,7 @@ vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m2_b4(op1, op2, vl); + return __riscv_vmsne_vx_i8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2( @@ -103,7 +103,7 @@ vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsne_vv_i8m4_b2(op1, op2, vl); + return __riscv_vmsne_vv_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2( @@ -112,7 +112,7 @@ vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m4_b2(op1, op2, vl); + return __riscv_vmsne_vx_i8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1( @@ -121,7 +121,7 @@ vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsne_vv_i8m8_b1(op1, op2, vl); + return __riscv_vmsne_vv_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1( @@ -130,7 +130,7 @@ vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m8_b1(op1, op2, vl); + return __riscv_vmsne_vx_i8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64( @@ -139,7 +139,7 @@ vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsne_vv_i16mf4_b64(op1, op2, vl); + return __riscv_vmsne_vv_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64( @@ -148,7 +148,7 @@ vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16mf4_b64(op1, op2, vl); + return __riscv_vmsne_vx_i16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32( @@ -157,7 +157,7 @@ vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsne_vv_i16mf2_b32(op1, op2, vl); + return __riscv_vmsne_vv_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32( @@ -166,7 +166,7 @@ vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16mf2_b32(op1, op2, vl); + return __riscv_vmsne_vx_i16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16( @@ -175,7 +175,7 @@ vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsne_vv_i16m1_b16(op1, op2, vl); + return __riscv_vmsne_vv_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16( @@ -184,7 +184,7 @@ vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m1_b16(op1, op2, vl); + return __riscv_vmsne_vx_i16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8( @@ -193,7 +193,7 @@ vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsne_vv_i16m2_b8(op1, op2, vl); + return __riscv_vmsne_vv_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8( @@ -202,7 +202,7 @@ vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m2_b8(op1, op2, vl); + return __riscv_vmsne_vx_i16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4( @@ -211,7 +211,7 @@ vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsne_vv_i16m4_b4(op1, op2, vl); + return __riscv_vmsne_vv_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4( @@ -220,7 +220,7 @@ vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m4_b4(op1, op2, vl); + return __riscv_vmsne_vx_i16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2( @@ -229,7 +229,7 @@ vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsne_vv_i16m8_b2(op1, op2, vl); + return __riscv_vmsne_vv_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2( @@ -238,7 +238,7 @@ vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m8_b2(op1, op2, vl); + return __riscv_vmsne_vx_i16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64( @@ -247,7 +247,7 @@ vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsne_vv_i32mf2_b64(op1, op2, vl); + return __riscv_vmsne_vv_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64( @@ -256,7 +256,7 @@ vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32mf2_b64(op1, op2, vl); + return __riscv_vmsne_vx_i32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32( @@ -265,7 +265,7 @@ vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsne_vv_i32m1_b32(op1, op2, vl); + return __riscv_vmsne_vv_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32( @@ -274,7 +274,7 @@ vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m1_b32(op1, op2, vl); + return __riscv_vmsne_vx_i32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16( @@ -283,7 +283,7 @@ vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsne_vv_i32m2_b16(op1, op2, vl); + return __riscv_vmsne_vv_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16( @@ -292,7 +292,7 @@ vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m2_b16(op1, op2, vl); + return __riscv_vmsne_vx_i32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8( @@ -301,7 +301,7 @@ vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsne_vv_i32m4_b8(op1, op2, vl); + return __riscv_vmsne_vv_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8( @@ -310,7 +310,7 @@ vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m4_b8(op1, op2, vl); + return __riscv_vmsne_vx_i32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4( @@ -319,7 +319,7 @@ vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsne_vv_i32m8_b4(op1, op2, vl); + return __riscv_vmsne_vv_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4( @@ -328,7 +328,7 @@ vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m8_b4(op1, op2, vl); + return __riscv_vmsne_vx_i32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64( @@ -337,7 +337,7 @@ vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsne_vv_i64m1_b64(op1, op2, vl); + return __riscv_vmsne_vv_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64( @@ -346,7 +346,7 @@ vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m1_b64(op1, op2, vl); + return __riscv_vmsne_vx_i64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32( @@ -355,7 +355,7 @@ vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsne_vv_i64m2_b32(op1, op2, vl); + return __riscv_vmsne_vv_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32( @@ -364,7 +364,7 @@ vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m2_b32(op1, op2, vl); + return __riscv_vmsne_vx_i64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16( @@ -373,7 +373,7 @@ vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsne_vv_i64m4_b16(op1, op2, vl); + return __riscv_vmsne_vv_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16( @@ -382,7 +382,7 @@ vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m4_b16(op1, op2, vl); + return __riscv_vmsne_vx_i64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8( @@ -391,7 +391,7 @@ vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsne_vv_i64m8_b8(op1, op2, vl); + return __riscv_vmsne_vv_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8( @@ -400,7 +400,7 @@ vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m8_b8(op1, op2, vl); + return __riscv_vmsne_vx_i64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64( @@ -409,7 +409,7 @@ vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsne_vv_u8mf8_b64(op1, op2, vl); + return __riscv_vmsne_vv_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64( @@ -418,7 +418,7 @@ vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf8_b64(op1, op2, vl); + return __riscv_vmsne_vx_u8mf8_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32( @@ -427,7 +427,7 @@ vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsne_vv_u8mf4_b32(op1, op2, vl); + return __riscv_vmsne_vv_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32( @@ -436,7 +436,7 @@ vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf4_b32(op1, op2, vl); + return __riscv_vmsne_vx_u8mf4_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16( @@ -445,7 +445,7 @@ vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsne_vv_u8mf2_b16(op1, op2, vl); + return __riscv_vmsne_vv_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16( @@ -454,7 +454,7 @@ vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf2_b16(op1, op2, vl); + return __riscv_vmsne_vx_u8mf2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8( @@ -463,7 +463,7 @@ vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsne_vv_u8m1_b8(op1, op2, vl); + return __riscv_vmsne_vv_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8( @@ -472,7 +472,7 @@ vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m1_b8(op1, op2, vl); + return __riscv_vmsne_vx_u8m1_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4( @@ -481,7 +481,7 @@ vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsne_vv_u8m2_b4(op1, op2, vl); + return __riscv_vmsne_vv_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4( @@ -490,7 +490,7 @@ vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m2_b4(op1, op2, vl); + return __riscv_vmsne_vx_u8m2_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2( @@ -499,7 +499,7 @@ vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsne_vv_u8m4_b2(op1, op2, vl); + return __riscv_vmsne_vv_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2( @@ -508,7 +508,7 @@ vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m4_b2(op1, op2, vl); + return __riscv_vmsne_vx_u8m4_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1( @@ -517,7 +517,7 @@ vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsne_vv_u8m8_b1(op1, op2, vl); + return __riscv_vmsne_vv_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1( @@ -526,7 +526,7 @@ vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m8_b1(op1, op2, vl); + return __riscv_vmsne_vx_u8m8_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64( @@ -535,7 +535,7 @@ vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsne_vv_u16mf4_b64(op1, op2, vl); + return __riscv_vmsne_vv_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64( @@ -544,7 +544,7 @@ vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16mf4_b64(op1, op2, vl); + return __riscv_vmsne_vx_u16mf4_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32( @@ -553,7 +553,7 @@ vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsne_vv_u16mf2_b32(op1, op2, vl); + return __riscv_vmsne_vv_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32( @@ -562,7 +562,7 @@ vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16mf2_b32(op1, op2, vl); + return __riscv_vmsne_vx_u16mf2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16( @@ -571,7 +571,7 @@ vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsne_vv_u16m1_b16(op1, op2, vl); + return __riscv_vmsne_vv_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16( @@ -580,7 +580,7 @@ vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m1_b16(op1, op2, vl); + return __riscv_vmsne_vx_u16m1_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8( @@ -589,7 +589,7 @@ vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsne_vv_u16m2_b8(op1, op2, vl); + return __riscv_vmsne_vv_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8( @@ -598,7 +598,7 @@ vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m2_b8(op1, op2, vl); + return __riscv_vmsne_vx_u16m2_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4( @@ -607,7 +607,7 @@ vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsne_vv_u16m4_b4(op1, op2, vl); + return __riscv_vmsne_vv_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4( @@ -616,7 +616,7 @@ vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m4_b4(op1, op2, vl); + return __riscv_vmsne_vx_u16m4_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2( @@ -625,7 +625,7 @@ vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsne_vv_u16m8_b2(op1, op2, vl); + return __riscv_vmsne_vv_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2( @@ -634,7 +634,7 @@ vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m8_b2(op1, op2, vl); + return __riscv_vmsne_vx_u16m8_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64( @@ -643,7 +643,7 @@ vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsne_vv_u32mf2_b64(op1, op2, vl); + return __riscv_vmsne_vv_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64( @@ -652,7 +652,7 @@ vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32mf2_b64(op1, op2, vl); + return __riscv_vmsne_vx_u32mf2_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32( @@ -661,7 +661,7 @@ vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsne_vv_u32m1_b32(op1, op2, vl); + return __riscv_vmsne_vv_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32( @@ -670,7 +670,7 @@ vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m1_b32(op1, op2, vl); + return __riscv_vmsne_vx_u32m1_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16( @@ -679,7 +679,7 @@ vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsne_vv_u32m2_b16(op1, op2, vl); + return __riscv_vmsne_vv_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16( @@ -688,7 +688,7 @@ vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m2_b16(op1, op2, vl); + return __riscv_vmsne_vx_u32m2_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8( @@ -697,7 +697,7 @@ vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsne_vv_u32m4_b8(op1, op2, vl); + return __riscv_vmsne_vv_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8( @@ -706,7 +706,7 @@ vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m4_b8(op1, op2, vl); + return __riscv_vmsne_vx_u32m4_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4( @@ -715,7 +715,7 @@ vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsne_vv_u32m8_b4(op1, op2, vl); + return __riscv_vmsne_vv_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4( @@ -724,7 +724,7 @@ vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m8_b4(op1, op2, vl); + return __riscv_vmsne_vx_u32m8_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64( @@ -733,7 +733,7 @@ vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsne_vv_u64m1_b64(op1, op2, vl); + return __riscv_vmsne_vv_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64( @@ -742,7 +742,7 @@ vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m1_b64(op1, op2, vl); + return __riscv_vmsne_vx_u64m1_b64(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32( @@ -751,7 +751,7 @@ vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsne_vv_u64m2_b32(op1, op2, vl); + return __riscv_vmsne_vv_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32( @@ -760,7 +760,7 @@ vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m2_b32(op1, op2, vl); + return __riscv_vmsne_vx_u64m2_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16( @@ -769,7 +769,7 @@ vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsne_vv_u64m4_b16(op1, op2, vl); + return __riscv_vmsne_vv_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16( @@ -778,7 +778,7 @@ vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m4_b16(op1, op2, vl); + return __riscv_vmsne_vx_u64m4_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8( @@ -787,7 +787,7 @@ vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsne_vv_u64m8_b8(op1, op2, vl); + return __riscv_vmsne_vv_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8( @@ -796,7 +796,7 @@ vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m8_b8(op1, op2, vl); + return __riscv_vmsne_vx_u64m8_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64_m( @@ -805,7 +805,7 @@ vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsne_vv_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_m( @@ -814,7 +814,7 @@ vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_m( @@ -823,7 +823,7 @@ vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsne_vv_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_m( @@ -832,7 +832,7 @@ vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_m( @@ -841,7 +841,7 @@ vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsne_vv_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_m( @@ -850,7 +850,7 @@ vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_m( @@ -859,7 +859,7 @@ vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsne_vv_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_m( @@ -868,7 +868,7 @@ vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_m( @@ -877,7 +877,7 @@ vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsne_vv_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_m( @@ -886,7 +886,7 @@ vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_m( @@ -895,7 +895,7 @@ vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsne_vv_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_m( @@ -904,7 +904,7 @@ vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_m( @@ -913,7 +913,7 @@ vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsne_vv_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_m( @@ -922,7 +922,7 @@ vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_m( @@ -931,7 +931,7 @@ vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsne_vv_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_m( @@ -940,7 +940,7 @@ vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_m( @@ -949,7 +949,7 @@ vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsne_vv_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_m( @@ -958,7 +958,7 @@ vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_m( @@ -967,7 +967,7 @@ vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsne_vv_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_m( @@ -976,7 +976,7 @@ vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_m( @@ -985,7 +985,7 @@ vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsne_vv_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_m( @@ -994,7 +994,7 @@ vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_m( @@ -1003,7 +1003,7 @@ vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsne_vv_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_m( @@ -1012,7 +1012,7 @@ vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_m( @@ -1021,7 +1021,7 @@ vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsne_vv_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_m( @@ -1030,7 +1030,7 @@ vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_m( @@ -1039,7 +1039,7 @@ vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsne_vv_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_m( @@ -1048,7 +1048,7 @@ vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_m( @@ -1057,7 +1057,7 @@ vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsne_vv_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_m( @@ -1066,7 +1066,7 @@ vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_m( @@ -1075,7 +1075,7 @@ vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsne_vv_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_m( @@ -1084,7 +1084,7 @@ vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_m( @@ -1093,7 +1093,7 @@ vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsne_vv_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_m( @@ -1102,7 +1102,7 @@ vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_m( @@ -1111,7 +1111,7 @@ vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsne_vv_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_m( @@ -1120,7 +1120,7 @@ vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_m( @@ -1129,7 +1129,7 @@ vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsne_vv_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_m( @@ -1138,7 +1138,7 @@ vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_m( @@ -1147,7 +1147,7 @@ vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsne_vv_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_m( @@ -1156,7 +1156,7 @@ vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_m( @@ -1165,7 +1165,7 @@ vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsne_vv_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_m( @@ -1174,7 +1174,7 @@ vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_m( @@ -1183,7 +1183,7 @@ vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsne_vv_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_m( @@ -1192,7 +1192,7 @@ vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_i64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_m( @@ -1201,7 +1201,7 @@ vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsne_vv_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_m( @@ -1210,7 +1210,7 @@ vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf8_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u8mf8_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_m( @@ -1219,7 +1219,7 @@ vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsne_vv_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_m( @@ -1228,7 +1228,7 @@ vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf4_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u8mf4_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_m( @@ -1237,7 +1237,7 @@ vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsne_vv_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_m( @@ -1246,7 +1246,7 @@ vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf2_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u8mf2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_m( @@ -1255,7 +1255,7 @@ vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsne_vv_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_m( @@ -1264,7 +1264,7 @@ vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m1_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u8m1_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_m( @@ -1273,7 +1273,7 @@ vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsne_vv_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_m( @@ -1282,7 +1282,7 @@ vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m2_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u8m2_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_m( @@ -1291,7 +1291,7 @@ vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsne_vv_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_m( @@ -1300,7 +1300,7 @@ vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m4_b2_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u8m4_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_m( @@ -1309,7 +1309,7 @@ vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsne_vv_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_m( @@ -1318,7 +1318,7 @@ vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m8_b1_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u8m8_b1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_m( @@ -1327,7 +1327,7 @@ vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsne_vv_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_m( @@ -1336,7 +1336,7 @@ vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16mf4_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u16mf4_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_m( @@ -1345,7 +1345,7 @@ vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsne_vv_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_m( @@ -1354,7 +1354,7 @@ vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16mf2_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u16mf2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_m( @@ -1363,7 +1363,7 @@ vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsne_vv_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_m( @@ -1372,7 +1372,7 @@ vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m1_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u16m1_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_m( @@ -1381,7 +1381,7 @@ vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsne_vv_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_m( @@ -1390,7 +1390,7 @@ vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m2_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u16m2_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_m( @@ -1399,7 +1399,7 @@ vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsne_vv_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_m( @@ -1408,7 +1408,7 @@ vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m4_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u16m4_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_m( @@ -1417,7 +1417,7 @@ vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsne_vv_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_m( @@ -1426,7 +1426,7 @@ vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m8_b2_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u16m8_b2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_m( @@ -1435,7 +1435,7 @@ vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsne_vv_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_m( @@ -1444,7 +1444,7 @@ vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32mf2_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u32mf2_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_m( @@ -1453,7 +1453,7 @@ vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsne_vv_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_m( @@ -1462,7 +1462,7 @@ vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m1_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u32m1_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_m( @@ -1471,7 +1471,7 @@ vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsne_vv_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_m( @@ -1480,7 +1480,7 @@ vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m2_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u32m2_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_m( @@ -1489,7 +1489,7 @@ vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsne_vv_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_m( @@ -1498,7 +1498,7 @@ vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m4_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u32m4_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_m( @@ -1507,7 +1507,7 @@ vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsne_vv_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_m( @@ -1516,7 +1516,7 @@ vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m8_b4_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u32m8_b4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_m( @@ -1525,7 +1525,7 @@ vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsne_vv_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_m( @@ -1534,7 +1534,7 @@ vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m1_b64_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u64m1_b64_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_m( @@ -1543,7 +1543,7 @@ vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsne_vv_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_m( @@ -1552,7 +1552,7 @@ vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m2_b32_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u64m2_b32_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_m( @@ -1561,7 +1561,7 @@ vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsne_vv_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_m( @@ -1570,7 +1570,7 @@ vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m4_b16_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u64m4_b16_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_m( @@ -1579,7 +1579,7 @@ vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsne_vv_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vv_u64m8_b8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_m( @@ -1588,6 +1588,6 @@ vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m8_b8_m(mask, op1, op2, vl); + return __riscv_vmsne_vx_u64m8_b8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsof.c index 2e7aca0e1e95..bd522c9ac61a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsof.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmsof.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) { - return vmsof_m_b1(op1, vl); + return __riscv_vmsof_m_b1(op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) { - return vmsof_m_b2(op1, vl); + return __riscv_vmsof_m_b2(op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) { - return vmsof_m_b4(op1, vl); + return __riscv_vmsof_m_b4(op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { - return vmsof_m_b8(op1, vl); + return __riscv_vmsof_m_b8(op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) { - return vmsof_m_b16(op1, vl); + return __riscv_vmsof_m_b16(op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) { - return vmsof_m_b32(op1, vl); + return __riscv_vmsof_m_b32(op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b64( @@ -66,7 +66,7 @@ vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { - return vmsof_m_b64(op1, vl); + return __riscv_vmsof_m_b64(op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b1_m( @@ -75,7 +75,7 @@ vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return vmsof_m_b1_m(mask, op1, vl); + return __riscv_vmsof_m_b1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b2_m( @@ -84,7 +84,7 @@ vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return vmsof_m_b2_m(mask, op1, vl); + return __riscv_vmsof_m_b2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b4_m( @@ -93,7 +93,7 @@ vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return vmsof_m_b4_m(mask, op1, vl); + return __riscv_vmsof_m_b4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b8_m( @@ -102,7 +102,7 @@ vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return vmsof_m_b8_m(mask, op1, vl); + return __riscv_vmsof_m_b8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b16_m( @@ -111,7 +111,7 @@ vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return vmsof_m_b16_m(mask, op1, vl); + return __riscv_vmsof_m_b16_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b32_m( @@ -120,7 +120,7 @@ vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return vmsof_m_b32_m(mask, op1, vl); + return __riscv_vmsof_m_b32_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b64_m( @@ -129,6 +129,6 @@ vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return vmsof_m_b64_m(mask, op1, vl); + return __riscv_vmsof_m_b64_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmul.c index 1866cecfe93f..4a569b512717 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmul.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmul_vv_i8mf8(op1, op2, vl); + return __riscv_vmul_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf8(op1, op2, vl); + return __riscv_vmul_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmul_vv_i8mf4(op1, op2, vl); + return __riscv_vmul_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf4(op1, op2, vl); + return __riscv_vmul_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmul_vv_i8mf2(op1, op2, vl); + return __riscv_vmul_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf2(op1, op2, vl); + return __riscv_vmul_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmul_vv_i8m1(op1, op2, vl); + return __riscv_vmul_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m1(op1, op2, vl); + return __riscv_vmul_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmul_vv_i8m2(op1, op2, vl); + return __riscv_vmul_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m2(op1, op2, vl); + return __riscv_vmul_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmul_vv_i8m4(op1, op2, vl); + return __riscv_vmul_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m4(op1, op2, vl); + return __riscv_vmul_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmul_vv_i8m8(op1, op2, vl); + return __riscv_vmul_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m8(op1, op2, vl); + return __riscv_vmul_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmul_vv_i16mf4(op1, op2, vl); + return __riscv_vmul_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf4(op1, op2, vl); + return __riscv_vmul_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmul_vv_i16mf2(op1, op2, vl); + return __riscv_vmul_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf2(op1, op2, vl); + return __riscv_vmul_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmul_vv_i16m1(op1, op2, vl); + return __riscv_vmul_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m1(op1, op2, vl); + return __riscv_vmul_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmul_vv_i16m2(op1, op2, vl); + return __riscv_vmul_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m2(op1, op2, vl); + return __riscv_vmul_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmul_vv_i16m4(op1, op2, vl); + return __riscv_vmul_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m4(op1, op2, vl); + return __riscv_vmul_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmul_vv_i16m8(op1, op2, vl); + return __riscv_vmul_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m8(op1, op2, vl); + return __riscv_vmul_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmul_vv_i32mf2(op1, op2, vl); + return __riscv_vmul_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32mf2(op1, op2, vl); + return __riscv_vmul_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmul_vv_i32m1(op1, op2, vl); + return __riscv_vmul_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m1(op1, op2, vl); + return __riscv_vmul_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmul_vv_i32m2(op1, op2, vl); + return __riscv_vmul_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m2(op1, op2, vl); + return __riscv_vmul_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmul_vv_i32m4(op1, op2, vl); + return __riscv_vmul_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m4(op1, op2, vl); + return __riscv_vmul_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmul_vv_i32m8(op1, op2, vl); + return __riscv_vmul_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m8(op1, op2, vl); + return __riscv_vmul_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmul_vv_i64m1(op1, op2, vl); + return __riscv_vmul_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m1(op1, op2, vl); + return __riscv_vmul_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmul_vv_i64m2(op1, op2, vl); + return __riscv_vmul_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m2(op1, op2, vl); + return __riscv_vmul_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmul_vv_i64m4(op1, op2, vl); + return __riscv_vmul_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m4(op1, op2, vl); + return __riscv_vmul_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmul_vv_i64m8(op1, op2, vl); + return __riscv_vmul_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m8(op1, op2, vl); + return __riscv_vmul_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8( @@ -408,7 +408,7 @@ vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmul_vv_u8mf8(op1, op2, vl); + return __riscv_vmul_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8( @@ -417,7 +417,7 @@ vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf8(op1, op2, vl); + return __riscv_vmul_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4( @@ -426,7 +426,7 @@ vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmul_vv_u8mf4(op1, op2, vl); + return __riscv_vmul_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4( @@ -435,7 +435,7 @@ vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf4(op1, op2, vl); + return __riscv_vmul_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2( @@ -444,7 +444,7 @@ vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmul_vv_u8mf2(op1, op2, vl); + return __riscv_vmul_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2( @@ -453,7 +453,7 @@ vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf2(op1, op2, vl); + return __riscv_vmul_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m1( @@ -462,7 +462,7 @@ vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmul_vv_u8m1(op1, op2, vl); + return __riscv_vmul_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m1( @@ -471,7 +471,7 @@ vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m1(op1, op2, vl); + return __riscv_vmul_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m2( @@ -480,7 +480,7 @@ vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmul_vv_u8m2(op1, op2, vl); + return __riscv_vmul_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m2( @@ -489,7 +489,7 @@ vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m2(op1, op2, vl); + return __riscv_vmul_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m4( @@ -498,7 +498,7 @@ vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmul_vv_u8m4(op1, op2, vl); + return __riscv_vmul_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m4( @@ -507,7 +507,7 @@ vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m4(op1, op2, vl); + return __riscv_vmul_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m8( @@ -516,7 +516,7 @@ vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmul_vv_u8m8(op1, op2, vl); + return __riscv_vmul_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m8( @@ -525,7 +525,7 @@ vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m8(op1, op2, vl); + return __riscv_vmul_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4( @@ -534,7 +534,7 @@ vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmul_vv_u16mf4(op1, op2, vl); + return __riscv_vmul_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4( @@ -543,7 +543,7 @@ vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf4(op1, op2, vl); + return __riscv_vmul_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2( @@ -552,7 +552,7 @@ vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmul_vv_u16mf2(op1, op2, vl); + return __riscv_vmul_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2( @@ -561,7 +561,7 @@ vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf2(op1, op2, vl); + return __riscv_vmul_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m1( @@ -570,7 +570,7 @@ vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmul_vv_u16m1(op1, op2, vl); + return __riscv_vmul_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m1( @@ -579,7 +579,7 @@ vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m1(op1, op2, vl); + return __riscv_vmul_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m2( @@ -588,7 +588,7 @@ vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmul_vv_u16m2(op1, op2, vl); + return __riscv_vmul_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m2( @@ -597,7 +597,7 @@ vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m2(op1, op2, vl); + return __riscv_vmul_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m4( @@ -606,7 +606,7 @@ vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmul_vv_u16m4(op1, op2, vl); + return __riscv_vmul_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m4( @@ -615,7 +615,7 @@ vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m4(op1, op2, vl); + return __riscv_vmul_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m8( @@ -624,7 +624,7 @@ vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmul_vv_u16m8(op1, op2, vl); + return __riscv_vmul_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m8( @@ -633,7 +633,7 @@ vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m8(op1, op2, vl); + return __riscv_vmul_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2( @@ -642,7 +642,7 @@ vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmul_vv_u32mf2(op1, op2, vl); + return __riscv_vmul_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2( @@ -651,7 +651,7 @@ vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32mf2(op1, op2, vl); + return __riscv_vmul_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m1( @@ -660,7 +660,7 @@ vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmul_vv_u32m1(op1, op2, vl); + return __riscv_vmul_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m1( @@ -669,7 +669,7 @@ vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m1(op1, op2, vl); + return __riscv_vmul_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m2( @@ -678,7 +678,7 @@ vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmul_vv_u32m2(op1, op2, vl); + return __riscv_vmul_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m2( @@ -687,7 +687,7 @@ vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m2(op1, op2, vl); + return __riscv_vmul_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m4( @@ -696,7 +696,7 @@ vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmul_vv_u32m4(op1, op2, vl); + return __riscv_vmul_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m4( @@ -705,7 +705,7 @@ vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m4(op1, op2, vl); + return __riscv_vmul_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m8( @@ -714,7 +714,7 @@ vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmul_vv_u32m8(op1, op2, vl); + return __riscv_vmul_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m8( @@ -723,7 +723,7 @@ vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m8(op1, op2, vl); + return __riscv_vmul_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m1( @@ -732,7 +732,7 @@ vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmul_vv_u64m1(op1, op2, vl); + return __riscv_vmul_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m1( @@ -741,7 +741,7 @@ vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m1(op1, op2, vl); + return __riscv_vmul_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m2( @@ -750,7 +750,7 @@ vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmul_vv_u64m2(op1, op2, vl); + return __riscv_vmul_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m2( @@ -759,7 +759,7 @@ vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m2(op1, op2, vl); + return __riscv_vmul_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m4( @@ -768,7 +768,7 @@ vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmul_vv_u64m4(op1, op2, vl); + return __riscv_vmul_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m4( @@ -777,7 +777,7 @@ vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m4(op1, op2, vl); + return __riscv_vmul_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m8( @@ -786,7 +786,7 @@ vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmul_vv_u64m8(op1, op2, vl); + return __riscv_vmul_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m8( @@ -795,7 +795,7 @@ vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m8(op1, op2, vl); + return __riscv_vmul_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m( @@ -804,7 +804,7 @@ vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmul_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m( @@ -813,7 +813,7 @@ vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m( @@ -822,7 +822,7 @@ vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmul_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m( @@ -831,7 +831,7 @@ vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m( @@ -840,7 +840,7 @@ vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmul_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m( @@ -849,7 +849,7 @@ vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m( @@ -858,7 +858,7 @@ vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmul_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m( @@ -867,7 +867,7 @@ vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m( @@ -876,7 +876,7 @@ vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmul_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m( @@ -885,7 +885,7 @@ vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m( @@ -894,7 +894,7 @@ vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmul_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m( @@ -903,7 +903,7 @@ vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m( @@ -912,7 +912,7 @@ vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmul_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m( @@ -921,7 +921,7 @@ vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m( @@ -930,7 +930,7 @@ vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmul_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m( @@ -939,7 +939,7 @@ vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m( @@ -948,7 +948,7 @@ vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmul_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m( @@ -957,7 +957,7 @@ vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m( @@ -966,7 +966,7 @@ vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmul_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m( @@ -975,7 +975,7 @@ vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m( @@ -984,7 +984,7 @@ vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmul_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m( @@ -993,7 +993,7 @@ vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m( @@ -1002,7 +1002,7 @@ vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmul_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m( @@ -1011,7 +1011,7 @@ vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m( @@ -1020,7 +1020,7 @@ vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmul_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m( @@ -1029,7 +1029,7 @@ vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m( @@ -1038,7 +1038,7 @@ vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmul_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmul_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m( @@ -1065,7 +1065,7 @@ vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m( @@ -1074,7 +1074,7 @@ vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmul_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m( @@ -1083,7 +1083,7 @@ vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m( @@ -1092,7 +1092,7 @@ vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmul_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m( @@ -1101,7 +1101,7 @@ vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m( @@ -1110,7 +1110,7 @@ vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmul_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m( @@ -1119,7 +1119,7 @@ vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m( @@ -1128,7 +1128,7 @@ vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmul_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m( @@ -1137,7 +1137,7 @@ vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m( @@ -1146,7 +1146,7 @@ vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmul_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m( @@ -1155,7 +1155,7 @@ vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m( @@ -1164,7 +1164,7 @@ vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmul_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m( @@ -1173,7 +1173,7 @@ vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m( @@ -1182,7 +1182,7 @@ vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmul_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vmul_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m( @@ -1191,7 +1191,7 @@ vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vmul_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m( @@ -1200,7 +1200,7 @@ vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmul_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmul_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmul_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmul_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmul_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmul_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmul_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmul_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmul_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmul_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmul_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmul_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmul_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmul_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmul_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmul_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmul_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmul_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmul_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmul_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmul_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmul_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vmul_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vmul_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulh.c index 517507b4fb54..b00cc5f399c5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulh.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulh.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh_vv_i8mf8(op1, op2, vl); + return __riscv_vmulh_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf8(op1, op2, vl); + return __riscv_vmulh_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh_vv_i8mf4(op1, op2, vl); + return __riscv_vmulh_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf4(op1, op2, vl); + return __riscv_vmulh_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh_vv_i8mf2(op1, op2, vl); + return __riscv_vmulh_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf2(op1, op2, vl); + return __riscv_vmulh_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh_vv_i8m1(op1, op2, vl); + return __riscv_vmulh_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m1(op1, op2, vl); + return __riscv_vmulh_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh_vv_i8m2(op1, op2, vl); + return __riscv_vmulh_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m2(op1, op2, vl); + return __riscv_vmulh_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh_vv_i8m4(op1, op2, vl); + return __riscv_vmulh_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m4(op1, op2, vl); + return __riscv_vmulh_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh_vv_i8m8(op1, op2, vl); + return __riscv_vmulh_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m8(op1, op2, vl); + return __riscv_vmulh_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh_vv_i16mf4(op1, op2, vl); + return __riscv_vmulh_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf4(op1, op2, vl); + return __riscv_vmulh_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh_vv_i16mf2(op1, op2, vl); + return __riscv_vmulh_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf2(op1, op2, vl); + return __riscv_vmulh_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh_vv_i16m1(op1, op2, vl); + return __riscv_vmulh_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m1(op1, op2, vl); + return __riscv_vmulh_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh_vv_i16m2(op1, op2, vl); + return __riscv_vmulh_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m2(op1, op2, vl); + return __riscv_vmulh_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh_vv_i16m4(op1, op2, vl); + return __riscv_vmulh_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m4(op1, op2, vl); + return __riscv_vmulh_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh_vv_i16m8(op1, op2, vl); + return __riscv_vmulh_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m8(op1, op2, vl); + return __riscv_vmulh_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh_vv_i32mf2(op1, op2, vl); + return __riscv_vmulh_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32mf2(op1, op2, vl); + return __riscv_vmulh_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh_vv_i32m1(op1, op2, vl); + return __riscv_vmulh_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m1(op1, op2, vl); + return __riscv_vmulh_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh_vv_i32m2(op1, op2, vl); + return __riscv_vmulh_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m2(op1, op2, vl); + return __riscv_vmulh_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh_vv_i32m4(op1, op2, vl); + return __riscv_vmulh_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m4(op1, op2, vl); + return __riscv_vmulh_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh_vv_i32m8(op1, op2, vl); + return __riscv_vmulh_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m8(op1, op2, vl); + return __riscv_vmulh_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh_vv_i64m1(op1, op2, vl); + return __riscv_vmulh_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m1(op1, op2, vl); + return __riscv_vmulh_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh_vv_i64m2(op1, op2, vl); + return __riscv_vmulh_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m2(op1, op2, vl); + return __riscv_vmulh_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh_vv_i64m4(op1, op2, vl); + return __riscv_vmulh_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m4(op1, op2, vl); + return __riscv_vmulh_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh_vv_i64m8(op1, op2, vl); + return __riscv_vmulh_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m8(op1, op2, vl); + return __riscv_vmulh_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhsu.c index 41a9df19eb82..8a270612011d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhsu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu_vv_i8mf8(op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf8(op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu_vv_i8mf4(op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf4(op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu_vv_i8mf2(op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf2(op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu_vv_i8m1(op1, op2, vl); + return __riscv_vmulhsu_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m1(op1, op2, vl); + return __riscv_vmulhsu_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu_vv_i8m2(op1, op2, vl); + return __riscv_vmulhsu_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m2(op1, op2, vl); + return __riscv_vmulhsu_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu_vv_i8m4(op1, op2, vl); + return __riscv_vmulhsu_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m4(op1, op2, vl); + return __riscv_vmulhsu_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu_vv_i8m8(op1, op2, vl); + return __riscv_vmulhsu_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m8(op1, op2, vl); + return __riscv_vmulhsu_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu_vv_i16mf4(op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf4(op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu_vv_i16mf2(op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf2(op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu_vv_i16m1(op1, op2, vl); + return __riscv_vmulhsu_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m1(op1, op2, vl); + return __riscv_vmulhsu_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu_vv_i16m2(op1, op2, vl); + return __riscv_vmulhsu_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m2(op1, op2, vl); + return __riscv_vmulhsu_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu_vv_i16m4(op1, op2, vl); + return __riscv_vmulhsu_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m4(op1, op2, vl); + return __riscv_vmulhsu_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu_vv_i16m8(op1, op2, vl); + return __riscv_vmulhsu_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m8(op1, op2, vl); + return __riscv_vmulhsu_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu_vv_i32mf2(op1, op2, vl); + return __riscv_vmulhsu_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32mf2(op1, op2, vl); + return __riscv_vmulhsu_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu_vv_i32m1(op1, op2, vl); + return __riscv_vmulhsu_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m1(op1, op2, vl); + return __riscv_vmulhsu_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu_vv_i32m2(op1, op2, vl); + return __riscv_vmulhsu_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m2(op1, op2, vl); + return __riscv_vmulhsu_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu_vv_i32m4(op1, op2, vl); + return __riscv_vmulhsu_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m4(op1, op2, vl); + return __riscv_vmulhsu_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu_vv_i32m8(op1, op2, vl); + return __riscv_vmulhsu_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m8(op1, op2, vl); + return __riscv_vmulhsu_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu_vv_i64m1(op1, op2, vl); + return __riscv_vmulhsu_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m1(op1, op2, vl); + return __riscv_vmulhsu_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu_vv_i64m2(op1, op2, vl); + return __riscv_vmulhsu_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m2(op1, op2, vl); + return __riscv_vmulhsu_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu_vv_i64m4(op1, op2, vl); + return __riscv_vmulhsu_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m4(op1, op2, vl); + return __riscv_vmulhsu_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu_vv_i64m8(op1, op2, vl); + return __riscv_vmulhsu_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m8(op1, op2, vl); + return __riscv_vmulhsu_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhu.c index 0bc234775bd8..9e5894856371 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmulhu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu_vv_u8mf8(op1, op2, vl); + return __riscv_vmulhu_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf8(op1, op2, vl); + return __riscv_vmulhu_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu_vv_u8mf4(op1, op2, vl); + return __riscv_vmulhu_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf4(op1, op2, vl); + return __riscv_vmulhu_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu_vv_u8mf2(op1, op2, vl); + return __riscv_vmulhu_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf2(op1, op2, vl); + return __riscv_vmulhu_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu_vv_u8m1(op1, op2, vl); + return __riscv_vmulhu_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m1(op1, op2, vl); + return __riscv_vmulhu_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu_vv_u8m2(op1, op2, vl); + return __riscv_vmulhu_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m2(op1, op2, vl); + return __riscv_vmulhu_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu_vv_u8m4(op1, op2, vl); + return __riscv_vmulhu_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m4(op1, op2, vl); + return __riscv_vmulhu_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu_vv_u8m8(op1, op2, vl); + return __riscv_vmulhu_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m8(op1, op2, vl); + return __riscv_vmulhu_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu_vv_u16mf4(op1, op2, vl); + return __riscv_vmulhu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf4(op1, op2, vl); + return __riscv_vmulhu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu_vv_u16mf2(op1, op2, vl); + return __riscv_vmulhu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf2(op1, op2, vl); + return __riscv_vmulhu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu_vv_u16m1(op1, op2, vl); + return __riscv_vmulhu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m1(op1, op2, vl); + return __riscv_vmulhu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu_vv_u16m2(op1, op2, vl); + return __riscv_vmulhu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m2(op1, op2, vl); + return __riscv_vmulhu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu_vv_u16m4(op1, op2, vl); + return __riscv_vmulhu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m4(op1, op2, vl); + return __riscv_vmulhu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu_vv_u16m8(op1, op2, vl); + return __riscv_vmulhu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m8(op1, op2, vl); + return __riscv_vmulhu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu_vv_u32mf2(op1, op2, vl); + return __riscv_vmulhu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32mf2(op1, op2, vl); + return __riscv_vmulhu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu_vv_u32m1(op1, op2, vl); + return __riscv_vmulhu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m1(op1, op2, vl); + return __riscv_vmulhu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu_vv_u32m2(op1, op2, vl); + return __riscv_vmulhu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m2(op1, op2, vl); + return __riscv_vmulhu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu_vv_u32m4(op1, op2, vl); + return __riscv_vmulhu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m4(op1, op2, vl); + return __riscv_vmulhu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu_vv_u32m8(op1, op2, vl); + return __riscv_vmulhu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m8(op1, op2, vl); + return __riscv_vmulhu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1( @@ -336,7 +336,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu_vv_u64m1(op1, op2, vl); + return __riscv_vmulhu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1( @@ -345,7 +345,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m1(op1, op2, vl); + return __riscv_vmulhu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2( @@ -354,7 +354,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu_vv_u64m2(op1, op2, vl); + return __riscv_vmulhu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2( @@ -363,7 +363,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m2(op1, op2, vl); + return __riscv_vmulhu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4( @@ -372,7 +372,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu_vv_u64m4(op1, op2, vl); + return __riscv_vmulhu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4( @@ -381,7 +381,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m4(op1, op2, vl); + return __riscv_vmulhu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8( @@ -390,7 +390,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu_vv_u64m8(op1, op2, vl); + return __riscv_vmulhu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m8(op1, op2, vl); + return __riscv_vmulhu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( @@ -417,7 +417,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( @@ -426,7 +426,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( @@ -435,7 +435,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( @@ -444,7 +444,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( @@ -453,7 +453,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( @@ -462,7 +462,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( @@ -471,7 +471,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( @@ -480,7 +480,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( @@ -489,7 +489,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( @@ -498,7 +498,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( @@ -507,7 +507,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( @@ -516,7 +516,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( @@ -525,7 +525,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( @@ -534,7 +534,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( @@ -543,7 +543,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( @@ -552,7 +552,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( @@ -561,7 +561,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( @@ -570,7 +570,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( @@ -579,7 +579,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( @@ -588,7 +588,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( @@ -597,7 +597,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( @@ -606,7 +606,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( @@ -615,7 +615,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( @@ -624,7 +624,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( @@ -633,7 +633,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( @@ -642,7 +642,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( @@ -651,7 +651,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( @@ -660,7 +660,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( @@ -669,7 +669,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( @@ -678,7 +678,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( @@ -687,7 +687,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( @@ -696,7 +696,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( @@ -705,7 +705,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( @@ -714,7 +714,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( @@ -723,7 +723,7 @@ vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( @@ -732,7 +732,7 @@ vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( @@ -741,7 +741,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( @@ -750,7 +750,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( @@ -759,7 +759,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( @@ -768,7 +768,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( @@ -777,7 +777,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( @@ -786,7 +786,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c index 2e4dc657420a..5364bd0630cc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) { - return vmv_v_v_i8mf8(src, vl); + return __riscv_vmv_v_v_i8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf8( @@ -22,7 +22,7 @@ vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_v_x_i8mf8(int8_t src, size_t vl) { - return vmv_v_x_i8mf8(src, vl); + return __riscv_vmv_v_x_i8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4( @@ -31,7 +31,7 @@ vint8mf8_t test_vmv_v_x_i8mf8(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) { - return vmv_v_v_i8mf4(src, vl); + return __riscv_vmv_v_v_i8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf4( @@ -40,7 +40,7 @@ vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmv_v_x_i8mf4(int8_t src, size_t vl) { - return vmv_v_x_i8mf4(src, vl); + return __riscv_vmv_v_x_i8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2( @@ -49,7 +49,7 @@ vint8mf4_t test_vmv_v_x_i8mf4(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) { - return vmv_v_v_i8mf2(src, vl); + return __riscv_vmv_v_v_i8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf2( @@ -58,7 +58,7 @@ vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmv_v_x_i8mf2(int8_t src, size_t vl) { - return vmv_v_x_i8mf2(src, vl); + return __riscv_vmv_v_x_i8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8m1( @@ -67,7 +67,7 @@ vint8mf2_t test_vmv_v_x_i8mf2(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { - return vmv_v_v_i8m1(src, vl); + return __riscv_vmv_v_v_i8m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8m1( @@ -76,7 +76,7 @@ vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_v_x_i8m1(int8_t src, size_t vl) { - return vmv_v_x_i8m1(src, vl); + return __riscv_vmv_v_x_i8m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8m2( @@ -85,7 +85,7 @@ vint8m1_t test_vmv_v_x_i8m1(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { - return vmv_v_v_i8m2(src, vl); + return __riscv_vmv_v_v_i8m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8m2( @@ -94,7 +94,7 @@ vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_v_x_i8m2(int8_t src, size_t vl) { - return vmv_v_x_i8m2(src, vl); + return __riscv_vmv_v_x_i8m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8m4( @@ -103,7 +103,7 @@ vint8m2_t test_vmv_v_x_i8m2(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { - return vmv_v_v_i8m4(src, vl); + return __riscv_vmv_v_v_i8m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8m4( @@ -112,7 +112,7 @@ vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_v_x_i8m4(int8_t src, size_t vl) { - return vmv_v_x_i8m4(src, vl); + return __riscv_vmv_v_x_i8m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8m8( @@ -121,7 +121,7 @@ vint8m4_t test_vmv_v_x_i8m4(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) { - return vmv_v_v_i8m8(src, vl); + return __riscv_vmv_v_v_i8m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8m8( @@ -130,7 +130,7 @@ vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_v_x_i8m8(int8_t src, size_t vl) { - return vmv_v_x_i8m8(src, vl); + return __riscv_vmv_v_x_i8m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4( @@ -139,7 +139,7 @@ vint8m8_t test_vmv_v_x_i8m8(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) { - return vmv_v_v_i16mf4(src, vl); + return __riscv_vmv_v_v_i16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf4( @@ -148,7 +148,7 @@ vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_v_x_i16mf4(int16_t src, size_t vl) { - return vmv_v_x_i16mf4(src, vl); + return __riscv_vmv_v_x_i16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2( @@ -157,7 +157,7 @@ vint16mf4_t test_vmv_v_x_i16mf4(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) { - return vmv_v_v_i16mf2(src, vl); + return __riscv_vmv_v_v_i16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf2( @@ -166,7 +166,7 @@ vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_v_x_i16mf2(int16_t src, size_t vl) { - return vmv_v_x_i16mf2(src, vl); + return __riscv_vmv_v_x_i16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16m1( @@ -175,7 +175,7 @@ vint16mf2_t test_vmv_v_x_i16mf2(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) { - return vmv_v_v_i16m1(src, vl); + return __riscv_vmv_v_v_i16m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16m1( @@ -184,7 +184,7 @@ vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_v_x_i16m1(int16_t src, size_t vl) { - return vmv_v_x_i16m1(src, vl); + return __riscv_vmv_v_x_i16m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16m2( @@ -193,7 +193,7 @@ vint16m1_t test_vmv_v_x_i16m1(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) { - return vmv_v_v_i16m2(src, vl); + return __riscv_vmv_v_v_i16m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16m2( @@ -202,7 +202,7 @@ vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_v_x_i16m2(int16_t src, size_t vl) { - return vmv_v_x_i16m2(src, vl); + return __riscv_vmv_v_x_i16m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16m4( @@ -211,7 +211,7 @@ vint16m2_t test_vmv_v_x_i16m2(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) { - return vmv_v_v_i16m4(src, vl); + return __riscv_vmv_v_v_i16m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16m4( @@ -220,7 +220,7 @@ vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_v_x_i16m4(int16_t src, size_t vl) { - return vmv_v_x_i16m4(src, vl); + return __riscv_vmv_v_x_i16m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16m8( @@ -229,7 +229,7 @@ vint16m4_t test_vmv_v_x_i16m4(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) { - return vmv_v_v_i16m8(src, vl); + return __riscv_vmv_v_v_i16m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16m8( @@ -238,7 +238,7 @@ vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_v_x_i16m8(int16_t src, size_t vl) { - return vmv_v_x_i16m8(src, vl); + return __riscv_vmv_v_x_i16m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2( @@ -247,7 +247,7 @@ vint16m8_t test_vmv_v_x_i16m8(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) { - return vmv_v_v_i32mf2(src, vl); + return __riscv_vmv_v_v_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2( @@ -256,7 +256,7 @@ vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_v_x_i32mf2(int32_t src, size_t vl) { - return vmv_v_x_i32mf2(src, vl); + return __riscv_vmv_v_x_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i32m1( @@ -265,7 +265,7 @@ vint32mf2_t test_vmv_v_x_i32mf2(int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) { - return vmv_v_v_i32m1(src, vl); + return __riscv_vmv_v_v_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i32m1( @@ -274,7 +274,7 @@ vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_v_x_i32m1(int32_t src, size_t vl) { - return vmv_v_x_i32m1(src, vl); + return __riscv_vmv_v_x_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i32m2( @@ -283,7 +283,7 @@ vint32m1_t test_vmv_v_x_i32m1(int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) { - return vmv_v_v_i32m2(src, vl); + return __riscv_vmv_v_v_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i32m2( @@ -292,7 +292,7 @@ vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_v_x_i32m2(int32_t src, size_t vl) { - return vmv_v_x_i32m2(src, vl); + return __riscv_vmv_v_x_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i32m4( @@ -301,7 +301,7 @@ vint32m2_t test_vmv_v_x_i32m2(int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) { - return vmv_v_v_i32m4(src, vl); + return __riscv_vmv_v_v_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i32m4( @@ -310,7 +310,7 @@ vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_v_x_i32m4(int32_t src, size_t vl) { - return vmv_v_x_i32m4(src, vl); + return __riscv_vmv_v_x_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i32m8( @@ -319,7 +319,7 @@ vint32m4_t test_vmv_v_x_i32m4(int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) { - return vmv_v_v_i32m8(src, vl); + return __riscv_vmv_v_v_i32m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i32m8( @@ -328,7 +328,7 @@ vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_v_x_i32m8(int32_t src, size_t vl) { - return vmv_v_x_i32m8(src, vl); + return __riscv_vmv_v_x_i32m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i64m1( @@ -337,7 +337,7 @@ vint32m8_t test_vmv_v_x_i32m8(int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) { - return vmv_v_v_i64m1(src, vl); + return __riscv_vmv_v_v_i64m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i64m1( @@ -346,7 +346,7 @@ vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_v_x_i64m1(int64_t src, size_t vl) { - return vmv_v_x_i64m1(src, vl); + return __riscv_vmv_v_x_i64m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i64m2( @@ -355,7 +355,7 @@ vint64m1_t test_vmv_v_x_i64m1(int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) { - return vmv_v_v_i64m2(src, vl); + return __riscv_vmv_v_v_i64m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i64m2( @@ -364,7 +364,7 @@ vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_v_x_i64m2(int64_t src, size_t vl) { - return vmv_v_x_i64m2(src, vl); + return __riscv_vmv_v_x_i64m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i64m4( @@ -373,7 +373,7 @@ vint64m2_t test_vmv_v_x_i64m2(int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) { - return vmv_v_v_i64m4(src, vl); + return __riscv_vmv_v_v_i64m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i64m4( @@ -382,7 +382,7 @@ vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_v_x_i64m4(int64_t src, size_t vl) { - return vmv_v_x_i64m4(src, vl); + return __riscv_vmv_v_x_i64m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i64m8( @@ -391,7 +391,7 @@ vint64m4_t test_vmv_v_x_i64m4(int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) { - return vmv_v_v_i64m8(src, vl); + return __riscv_vmv_v_v_i64m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i64m8( @@ -400,7 +400,7 @@ vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_v_x_i64m8(int64_t src, size_t vl) { - return vmv_v_x_i64m8(src, vl); + return __riscv_vmv_v_x_i64m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8( @@ -409,7 +409,7 @@ vint64m8_t test_vmv_v_x_i64m8(int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) { - return vmv_v_v_u8mf8(src, vl); + return __riscv_vmv_v_v_u8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf8( @@ -418,7 +418,7 @@ vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_v_x_u8mf8(uint8_t src, size_t vl) { - return vmv_v_x_u8mf8(src, vl); + return __riscv_vmv_v_x_u8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4( @@ -427,7 +427,7 @@ vuint8mf8_t test_vmv_v_x_u8mf8(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) { - return vmv_v_v_u8mf4(src, vl); + return __riscv_vmv_v_v_u8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf4( @@ -436,7 +436,7 @@ vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_v_x_u8mf4(uint8_t src, size_t vl) { - return vmv_v_x_u8mf4(src, vl); + return __riscv_vmv_v_x_u8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2( @@ -445,7 +445,7 @@ vuint8mf4_t test_vmv_v_x_u8mf4(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) { - return vmv_v_v_u8mf2(src, vl); + return __riscv_vmv_v_v_u8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf2( @@ -454,7 +454,7 @@ vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_v_x_u8mf2(uint8_t src, size_t vl) { - return vmv_v_x_u8mf2(src, vl); + return __riscv_vmv_v_x_u8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8m1( @@ -463,7 +463,7 @@ vuint8mf2_t test_vmv_v_x_u8mf2(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) { - return vmv_v_v_u8m1(src, vl); + return __riscv_vmv_v_v_u8m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8m1( @@ -472,7 +472,7 @@ vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_v_x_u8m1(uint8_t src, size_t vl) { - return vmv_v_x_u8m1(src, vl); + return __riscv_vmv_v_x_u8m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8m2( @@ -481,7 +481,7 @@ vuint8m1_t test_vmv_v_x_u8m1(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) { - return vmv_v_v_u8m2(src, vl); + return __riscv_vmv_v_v_u8m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8m2( @@ -490,7 +490,7 @@ vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_v_x_u8m2(uint8_t src, size_t vl) { - return vmv_v_x_u8m2(src, vl); + return __riscv_vmv_v_x_u8m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8m4( @@ -499,7 +499,7 @@ vuint8m2_t test_vmv_v_x_u8m2(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) { - return vmv_v_v_u8m4(src, vl); + return __riscv_vmv_v_v_u8m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8m4( @@ -508,7 +508,7 @@ vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmv_v_x_u8m4(uint8_t src, size_t vl) { - return vmv_v_x_u8m4(src, vl); + return __riscv_vmv_v_x_u8m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8m8( @@ -517,7 +517,7 @@ vuint8m4_t test_vmv_v_x_u8m4(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) { - return vmv_v_v_u8m8(src, vl); + return __riscv_vmv_v_v_u8m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8m8( @@ -526,7 +526,7 @@ vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmv_v_x_u8m8(uint8_t src, size_t vl) { - return vmv_v_x_u8m8(src, vl); + return __riscv_vmv_v_x_u8m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4( @@ -535,7 +535,7 @@ vuint8m8_t test_vmv_v_x_u8m8(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) { - return vmv_v_v_u16mf4(src, vl); + return __riscv_vmv_v_v_u16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf4( @@ -544,7 +544,7 @@ vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmv_v_x_u16mf4(uint16_t src, size_t vl) { - return vmv_v_x_u16mf4(src, vl); + return __riscv_vmv_v_x_u16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2( @@ -553,7 +553,7 @@ vuint16mf4_t test_vmv_v_x_u16mf4(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) { - return vmv_v_v_u16mf2(src, vl); + return __riscv_vmv_v_v_u16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf2( @@ -562,7 +562,7 @@ vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmv_v_x_u16mf2(uint16_t src, size_t vl) { - return vmv_v_x_u16mf2(src, vl); + return __riscv_vmv_v_x_u16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16m1( @@ -571,7 +571,7 @@ vuint16mf2_t test_vmv_v_x_u16mf2(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) { - return vmv_v_v_u16m1(src, vl); + return __riscv_vmv_v_v_u16m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16m1( @@ -580,7 +580,7 @@ vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmv_v_x_u16m1(uint16_t src, size_t vl) { - return vmv_v_x_u16m1(src, vl); + return __riscv_vmv_v_x_u16m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16m2( @@ -589,7 +589,7 @@ vuint16m1_t test_vmv_v_x_u16m1(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) { - return vmv_v_v_u16m2(src, vl); + return __riscv_vmv_v_v_u16m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16m2( @@ -598,7 +598,7 @@ vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmv_v_x_u16m2(uint16_t src, size_t vl) { - return vmv_v_x_u16m2(src, vl); + return __riscv_vmv_v_x_u16m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16m4( @@ -607,7 +607,7 @@ vuint16m2_t test_vmv_v_x_u16m2(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) { - return vmv_v_v_u16m4(src, vl); + return __riscv_vmv_v_v_u16m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16m4( @@ -616,7 +616,7 @@ vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmv_v_x_u16m4(uint16_t src, size_t vl) { - return vmv_v_x_u16m4(src, vl); + return __riscv_vmv_v_x_u16m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16m8( @@ -625,7 +625,7 @@ vuint16m4_t test_vmv_v_x_u16m4(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) { - return vmv_v_v_u16m8(src, vl); + return __riscv_vmv_v_v_u16m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16m8( @@ -634,7 +634,7 @@ vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmv_v_x_u16m8(uint16_t src, size_t vl) { - return vmv_v_x_u16m8(src, vl); + return __riscv_vmv_v_x_u16m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2( @@ -643,7 +643,7 @@ vuint16m8_t test_vmv_v_x_u16m8(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) { - return vmv_v_v_u32mf2(src, vl); + return __riscv_vmv_v_v_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2( @@ -652,7 +652,7 @@ vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmv_v_x_u32mf2(uint32_t src, size_t vl) { - return vmv_v_x_u32mf2(src, vl); + return __riscv_vmv_v_x_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u32m1( @@ -661,7 +661,7 @@ vuint32mf2_t test_vmv_v_x_u32mf2(uint32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) { - return vmv_v_v_u32m1(src, vl); + return __riscv_vmv_v_v_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u32m1( @@ -670,7 +670,7 @@ vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmv_v_x_u32m1(uint32_t src, size_t vl) { - return vmv_v_x_u32m1(src, vl); + return __riscv_vmv_v_x_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u32m2( @@ -679,7 +679,7 @@ vuint32m1_t test_vmv_v_x_u32m1(uint32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) { - return vmv_v_v_u32m2(src, vl); + return __riscv_vmv_v_v_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u32m2( @@ -688,7 +688,7 @@ vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmv_v_x_u32m2(uint32_t src, size_t vl) { - return vmv_v_x_u32m2(src, vl); + return __riscv_vmv_v_x_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u32m4( @@ -697,7 +697,7 @@ vuint32m2_t test_vmv_v_x_u32m2(uint32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) { - return vmv_v_v_u32m4(src, vl); + return __riscv_vmv_v_v_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u32m4( @@ -706,7 +706,7 @@ vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmv_v_x_u32m4(uint32_t src, size_t vl) { - return vmv_v_x_u32m4(src, vl); + return __riscv_vmv_v_x_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u32m8( @@ -715,7 +715,7 @@ vuint32m4_t test_vmv_v_x_u32m4(uint32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) { - return vmv_v_v_u32m8(src, vl); + return __riscv_vmv_v_v_u32m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u32m8( @@ -724,7 +724,7 @@ vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmv_v_x_u32m8(uint32_t src, size_t vl) { - return vmv_v_x_u32m8(src, vl); + return __riscv_vmv_v_x_u32m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u64m1( @@ -733,7 +733,7 @@ vuint32m8_t test_vmv_v_x_u32m8(uint32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) { - return vmv_v_v_u64m1(src, vl); + return __riscv_vmv_v_v_u64m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u64m1( @@ -742,7 +742,7 @@ vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmv_v_x_u64m1(uint64_t src, size_t vl) { - return vmv_v_x_u64m1(src, vl); + return __riscv_vmv_v_x_u64m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u64m2( @@ -751,7 +751,7 @@ vuint64m1_t test_vmv_v_x_u64m1(uint64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) { - return vmv_v_v_u64m2(src, vl); + return __riscv_vmv_v_v_u64m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u64m2( @@ -760,7 +760,7 @@ vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmv_v_x_u64m2(uint64_t src, size_t vl) { - return vmv_v_x_u64m2(src, vl); + return __riscv_vmv_v_x_u64m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u64m4( @@ -769,7 +769,7 @@ vuint64m2_t test_vmv_v_x_u64m2(uint64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) { - return vmv_v_v_u64m4(src, vl); + return __riscv_vmv_v_v_u64m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u64m4( @@ -778,7 +778,7 @@ vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmv_v_x_u64m4(uint64_t src, size_t vl) { - return vmv_v_x_u64m4(src, vl); + return __riscv_vmv_v_x_u64m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u64m8( @@ -787,7 +787,7 @@ vuint64m4_t test_vmv_v_x_u64m4(uint64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) { - return vmv_v_v_u64m8(src, vl); + return __riscv_vmv_v_v_u64m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u64m8( @@ -796,7 +796,7 @@ vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmv_v_x_u64m8(uint64_t src, size_t vl) { - return vmv_v_x_u64m8(src, vl); + return __riscv_vmv_v_x_u64m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4( @@ -805,7 +805,7 @@ vuint64m8_t test_vmv_v_x_u64m8(uint64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vmv_v_v_f16mf4(vfloat16mf4_t src, size_t vl) { - return vmv_v_v_f16mf4(src, vl); + return __riscv_vmv_v_v_f16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2( @@ -814,7 +814,7 @@ vfloat16mf4_t test_vmv_v_v_f16mf4(vfloat16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vmv_v_v_f16mf2(vfloat16mf2_t src, size_t vl) { - return vmv_v_v_f16mf2(src, vl); + return __riscv_vmv_v_v_f16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16m1( @@ -823,7 +823,7 @@ vfloat16mf2_t test_vmv_v_v_f16mf2(vfloat16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vmv_v_v_f16m1(vfloat16m1_t src, size_t vl) { - return vmv_v_v_f16m1(src, vl); + return __riscv_vmv_v_v_f16m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16m2( @@ -832,7 +832,7 @@ vfloat16m1_t test_vmv_v_v_f16m1(vfloat16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vmv_v_v_f16m2(vfloat16m2_t src, size_t vl) { - return vmv_v_v_f16m2(src, vl); + return __riscv_vmv_v_v_f16m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16m4( @@ -841,7 +841,7 @@ vfloat16m2_t test_vmv_v_v_f16m2(vfloat16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vmv_v_v_f16m4(vfloat16m4_t src, size_t vl) { - return vmv_v_v_f16m4(src, vl); + return __riscv_vmv_v_v_f16m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16m8( @@ -850,7 +850,7 @@ vfloat16m4_t test_vmv_v_v_f16m4(vfloat16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vmv_v_v_f16m8(vfloat16m8_t src, size_t vl) { - return vmv_v_v_f16m8(src, vl); + return __riscv_vmv_v_v_f16m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2( @@ -859,7 +859,7 @@ vfloat16m8_t test_vmv_v_v_f16m8(vfloat16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) { - return vmv_v_v_f32mf2(src, vl); + return __riscv_vmv_v_v_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f32m1( @@ -868,7 +868,7 @@ vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) { - return vmv_v_v_f32m1(src, vl); + return __riscv_vmv_v_v_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f32m2( @@ -877,7 +877,7 @@ vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) { - return vmv_v_v_f32m2(src, vl); + return __riscv_vmv_v_v_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f32m4( @@ -886,7 +886,7 @@ vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) { - return vmv_v_v_f32m4(src, vl); + return __riscv_vmv_v_v_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f32m8( @@ -895,7 +895,7 @@ vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) { - return vmv_v_v_f32m8(src, vl); + return __riscv_vmv_v_v_f32m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f64m1( @@ -904,7 +904,7 @@ vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) { - return vmv_v_v_f64m1(src, vl); + return __riscv_vmv_v_v_f64m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f64m2( @@ -913,7 +913,7 @@ vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) { - return vmv_v_v_f64m2(src, vl); + return __riscv_vmv_v_v_f64m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f64m4( @@ -922,7 +922,7 @@ vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) { - return vmv_v_v_f64m4(src, vl); + return __riscv_vmv_v_v_f64m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f64m8( @@ -931,7 +931,7 @@ vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) { - return vmv_v_v_f64m8(src, vl); + return __riscv_vmv_v_v_f64m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf8_i8( @@ -940,7 +940,7 @@ vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) { - return vmv_x_s_i8mf8_i8(src); + return __riscv_vmv_x_s_i8mf8_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8( @@ -949,7 +949,7 @@ int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_s_x_i8mf8(int8_t src, size_t vl) { - return vmv_s_x_i8mf8(src, vl); + return __riscv_vmv_s_x_i8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf4_i8( @@ -958,7 +958,7 @@ vint8mf8_t test_vmv_s_x_i8mf8(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) { - return vmv_x_s_i8mf4_i8(src); + return __riscv_vmv_x_s_i8mf4_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4( @@ -967,7 +967,7 @@ int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmv_s_x_i8mf4(int8_t src, size_t vl) { - return vmv_s_x_i8mf4(src, vl); + return __riscv_vmv_s_x_i8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf2_i8( @@ -976,7 +976,7 @@ vint8mf4_t test_vmv_s_x_i8mf4(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) { - return vmv_x_s_i8mf2_i8(src); + return __riscv_vmv_x_s_i8mf2_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2( @@ -985,7 +985,7 @@ int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmv_s_x_i8mf2(int8_t src, size_t vl) { - return vmv_s_x_i8mf2(src, vl); + return __riscv_vmv_s_x_i8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m1_i8( @@ -994,7 +994,7 @@ vint8mf2_t test_vmv_s_x_i8mf2(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) { - return vmv_x_s_i8m1_i8(src); + return __riscv_vmv_x_s_i8m1_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m1( @@ -1003,7 +1003,7 @@ int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_s_x_i8m1(int8_t src, size_t vl) { - return vmv_s_x_i8m1(src, vl); + return __riscv_vmv_s_x_i8m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m2_i8( @@ -1012,7 +1012,7 @@ vint8m1_t test_vmv_s_x_i8m1(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) { - return vmv_x_s_i8m2_i8(src); + return __riscv_vmv_x_s_i8m2_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m2( @@ -1021,7 +1021,7 @@ int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_s_x_i8m2(int8_t src, size_t vl) { - return vmv_s_x_i8m2(src, vl); + return __riscv_vmv_s_x_i8m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m4_i8( @@ -1030,7 +1030,7 @@ vint8m2_t test_vmv_s_x_i8m2(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) { - return vmv_x_s_i8m4_i8(src); + return __riscv_vmv_x_s_i8m4_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m4( @@ -1039,7 +1039,7 @@ int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_s_x_i8m4(int8_t src, size_t vl) { - return vmv_s_x_i8m4(src, vl); + return __riscv_vmv_s_x_i8m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m8_i8( @@ -1048,7 +1048,7 @@ vint8m4_t test_vmv_s_x_i8m4(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) { - return vmv_x_s_i8m8_i8(src); + return __riscv_vmv_x_s_i8m8_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m8( @@ -1057,7 +1057,7 @@ int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_s_x_i8m8(int8_t src, size_t vl) { - return vmv_s_x_i8m8(src, vl); + return __riscv_vmv_s_x_i8m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf4_i16( @@ -1066,7 +1066,7 @@ vint8m8_t test_vmv_s_x_i8m8(int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) { - return vmv_x_s_i16mf4_i16(src); + return __riscv_vmv_x_s_i16mf4_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4( @@ -1075,7 +1075,7 @@ int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_s_x_i16mf4(int16_t src, size_t vl) { - return vmv_s_x_i16mf4(src, vl); + return __riscv_vmv_s_x_i16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf2_i16( @@ -1084,7 +1084,7 @@ vint16mf4_t test_vmv_s_x_i16mf4(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) { - return vmv_x_s_i16mf2_i16(src); + return __riscv_vmv_x_s_i16mf2_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2( @@ -1093,7 +1093,7 @@ int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_s_x_i16mf2(int16_t src, size_t vl) { - return vmv_s_x_i16mf2(src, vl); + return __riscv_vmv_s_x_i16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m1_i16( @@ -1102,7 +1102,7 @@ vint16mf2_t test_vmv_s_x_i16mf2(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) { - return vmv_x_s_i16m1_i16(src); + return __riscv_vmv_x_s_i16m1_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m1( @@ -1111,7 +1111,7 @@ int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_s_x_i16m1(int16_t src, size_t vl) { - return vmv_s_x_i16m1(src, vl); + return __riscv_vmv_s_x_i16m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m2_i16( @@ -1120,7 +1120,7 @@ vint16m1_t test_vmv_s_x_i16m1(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) { - return vmv_x_s_i16m2_i16(src); + return __riscv_vmv_x_s_i16m2_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m2( @@ -1129,7 +1129,7 @@ int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_s_x_i16m2(int16_t src, size_t vl) { - return vmv_s_x_i16m2(src, vl); + return __riscv_vmv_s_x_i16m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m4_i16( @@ -1138,7 +1138,7 @@ vint16m2_t test_vmv_s_x_i16m2(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) { - return vmv_x_s_i16m4_i16(src); + return __riscv_vmv_x_s_i16m4_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m4( @@ -1147,7 +1147,7 @@ int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_s_x_i16m4(int16_t src, size_t vl) { - return vmv_s_x_i16m4(src, vl); + return __riscv_vmv_s_x_i16m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m8_i16( @@ -1156,7 +1156,7 @@ vint16m4_t test_vmv_s_x_i16m4(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) { - return vmv_x_s_i16m8_i16(src); + return __riscv_vmv_x_s_i16m8_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m8( @@ -1165,7 +1165,7 @@ int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_s_x_i16m8(int16_t src, size_t vl) { - return vmv_s_x_i16m8(src, vl); + return __riscv_vmv_s_x_i16m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32mf2_i32( @@ -1174,7 +1174,7 @@ vint16m8_t test_vmv_s_x_i16m8(int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i32 [[TMP0]] // int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) { - return vmv_x_s_i32mf2_i32(src); + return __riscv_vmv_x_s_i32mf2_i32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2( @@ -1183,7 +1183,7 @@ int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_s_x_i32mf2(int32_t src, size_t vl) { - return vmv_s_x_i32mf2(src, vl); + return __riscv_vmv_s_x_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m1_i32( @@ -1192,7 +1192,7 @@ vint32mf2_t test_vmv_s_x_i32mf2(int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret i32 [[TMP0]] // int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) { - return vmv_x_s_i32m1_i32(src); + return __riscv_vmv_x_s_i32m1_i32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m1( @@ -1201,7 +1201,7 @@ int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_s_x_i32m1(int32_t src, size_t vl) { - return vmv_s_x_i32m1(src, vl); + return __riscv_vmv_s_x_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m2_i32( @@ -1210,7 +1210,7 @@ vint32m1_t test_vmv_s_x_i32m1(int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret i32 [[TMP0]] // int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) { - return vmv_x_s_i32m2_i32(src); + return __riscv_vmv_x_s_i32m2_i32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m2( @@ -1219,7 +1219,7 @@ int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_s_x_i32m2(int32_t src, size_t vl) { - return vmv_s_x_i32m2(src, vl); + return __riscv_vmv_s_x_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m4_i32( @@ -1228,7 +1228,7 @@ vint32m2_t test_vmv_s_x_i32m2(int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret i32 [[TMP0]] // int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) { - return vmv_x_s_i32m4_i32(src); + return __riscv_vmv_x_s_i32m4_i32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m4( @@ -1237,7 +1237,7 @@ int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_s_x_i32m4(int32_t src, size_t vl) { - return vmv_s_x_i32m4(src, vl); + return __riscv_vmv_s_x_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m8_i32( @@ -1246,7 +1246,7 @@ vint32m4_t test_vmv_s_x_i32m4(int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret i32 [[TMP0]] // int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) { - return vmv_x_s_i32m8_i32(src); + return __riscv_vmv_x_s_i32m8_i32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m8( @@ -1255,7 +1255,7 @@ int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_s_x_i32m8(int32_t src, size_t vl) { - return vmv_s_x_i32m8(src, vl); + return __riscv_vmv_s_x_i32m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m1_i64( @@ -1264,7 +1264,7 @@ vint32m8_t test_vmv_s_x_i32m8(int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) { - return vmv_x_s_i64m1_i64(src); + return __riscv_vmv_x_s_i64m1_i64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m1( @@ -1273,7 +1273,7 @@ int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_s_x_i64m1(int64_t src, size_t vl) { - return vmv_s_x_i64m1(src, vl); + return __riscv_vmv_s_x_i64m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m2_i64( @@ -1282,7 +1282,7 @@ vint64m1_t test_vmv_s_x_i64m1(int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) { - return vmv_x_s_i64m2_i64(src); + return __riscv_vmv_x_s_i64m2_i64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m2( @@ -1291,7 +1291,7 @@ int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_s_x_i64m2(int64_t src, size_t vl) { - return vmv_s_x_i64m2(src, vl); + return __riscv_vmv_s_x_i64m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m4_i64( @@ -1300,7 +1300,7 @@ vint64m2_t test_vmv_s_x_i64m2(int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) { - return vmv_x_s_i64m4_i64(src); + return __riscv_vmv_x_s_i64m4_i64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m4( @@ -1309,7 +1309,7 @@ int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_s_x_i64m4(int64_t src, size_t vl) { - return vmv_s_x_i64m4(src, vl); + return __riscv_vmv_s_x_i64m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m8_i64( @@ -1318,7 +1318,7 @@ vint64m4_t test_vmv_s_x_i64m4(int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) { - return vmv_x_s_i64m8_i64(src); + return __riscv_vmv_x_s_i64m8_i64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m8( @@ -1327,7 +1327,7 @@ int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_s_x_i64m8(int64_t src, size_t vl) { - return vmv_s_x_i64m8(src, vl); + return __riscv_vmv_s_x_i64m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf8_u8( @@ -1336,7 +1336,7 @@ vint64m8_t test_vmv_s_x_i64m8(int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) { - return vmv_x_s_u8mf8_u8(src); + return __riscv_vmv_x_s_u8mf8_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8( @@ -1345,7 +1345,7 @@ uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_s_x_u8mf8(uint8_t src, size_t vl) { - return vmv_s_x_u8mf8(src, vl); + return __riscv_vmv_s_x_u8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf4_u8( @@ -1354,7 +1354,7 @@ vuint8mf8_t test_vmv_s_x_u8mf8(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) { - return vmv_x_s_u8mf4_u8(src); + return __riscv_vmv_x_s_u8mf4_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4( @@ -1363,7 +1363,7 @@ uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_s_x_u8mf4(uint8_t src, size_t vl) { - return vmv_s_x_u8mf4(src, vl); + return __riscv_vmv_s_x_u8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf2_u8( @@ -1372,7 +1372,7 @@ vuint8mf4_t test_vmv_s_x_u8mf4(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) { - return vmv_x_s_u8mf2_u8(src); + return __riscv_vmv_x_s_u8mf2_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2( @@ -1381,7 +1381,7 @@ uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_s_x_u8mf2(uint8_t src, size_t vl) { - return vmv_s_x_u8mf2(src, vl); + return __riscv_vmv_s_x_u8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m1_u8( @@ -1390,7 +1390,7 @@ vuint8mf2_t test_vmv_s_x_u8mf2(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) { - return vmv_x_s_u8m1_u8(src); + return __riscv_vmv_x_s_u8m1_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8m1( @@ -1399,7 +1399,7 @@ uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_s_x_u8m1(uint8_t src, size_t vl) { - return vmv_s_x_u8m1(src, vl); + return __riscv_vmv_s_x_u8m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m2_u8( @@ -1408,7 +1408,7 @@ vuint8m1_t test_vmv_s_x_u8m1(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) { - return vmv_x_s_u8m2_u8(src); + return __riscv_vmv_x_s_u8m2_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8m2( @@ -1417,7 +1417,7 @@ uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_s_x_u8m2(uint8_t src, size_t vl) { - return vmv_s_x_u8m2(src, vl); + return __riscv_vmv_s_x_u8m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m4_u8( @@ -1426,7 +1426,7 @@ vuint8m2_t test_vmv_s_x_u8m2(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) { - return vmv_x_s_u8m4_u8(src); + return __riscv_vmv_x_s_u8m4_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8m4( @@ -1435,7 +1435,7 @@ uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmv_s_x_u8m4(uint8_t src, size_t vl) { - return vmv_s_x_u8m4(src, vl); + return __riscv_vmv_s_x_u8m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m8_u8( @@ -1444,7 +1444,7 @@ vuint8m4_t test_vmv_s_x_u8m4(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) { - return vmv_x_s_u8m8_u8(src); + return __riscv_vmv_x_s_u8m8_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8m8( @@ -1453,7 +1453,7 @@ uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmv_s_x_u8m8(uint8_t src, size_t vl) { - return vmv_s_x_u8m8(src, vl); + return __riscv_vmv_s_x_u8m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf4_u16( @@ -1462,7 +1462,7 @@ vuint8m8_t test_vmv_s_x_u8m8(uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) { - return vmv_x_s_u16mf4_u16(src); + return __riscv_vmv_x_s_u16mf4_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4( @@ -1471,7 +1471,7 @@ uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmv_s_x_u16mf4(uint16_t src, size_t vl) { - return vmv_s_x_u16mf4(src, vl); + return __riscv_vmv_s_x_u16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf2_u16( @@ -1480,7 +1480,7 @@ vuint16mf4_t test_vmv_s_x_u16mf4(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) { - return vmv_x_s_u16mf2_u16(src); + return __riscv_vmv_x_s_u16mf2_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2( @@ -1489,7 +1489,7 @@ uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmv_s_x_u16mf2(uint16_t src, size_t vl) { - return vmv_s_x_u16mf2(src, vl); + return __riscv_vmv_s_x_u16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m1_u16( @@ -1498,7 +1498,7 @@ vuint16mf2_t test_vmv_s_x_u16mf2(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) { - return vmv_x_s_u16m1_u16(src); + return __riscv_vmv_x_s_u16m1_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m1( @@ -1507,7 +1507,7 @@ uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmv_s_x_u16m1(uint16_t src, size_t vl) { - return vmv_s_x_u16m1(src, vl); + return __riscv_vmv_s_x_u16m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m2_u16( @@ -1516,7 +1516,7 @@ vuint16m1_t test_vmv_s_x_u16m1(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) { - return vmv_x_s_u16m2_u16(src); + return __riscv_vmv_x_s_u16m2_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m2( @@ -1525,7 +1525,7 @@ uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmv_s_x_u16m2(uint16_t src, size_t vl) { - return vmv_s_x_u16m2(src, vl); + return __riscv_vmv_s_x_u16m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m4_u16( @@ -1534,7 +1534,7 @@ vuint16m2_t test_vmv_s_x_u16m2(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) { - return vmv_x_s_u16m4_u16(src); + return __riscv_vmv_x_s_u16m4_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m4( @@ -1543,7 +1543,7 @@ uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmv_s_x_u16m4(uint16_t src, size_t vl) { - return vmv_s_x_u16m4(src, vl); + return __riscv_vmv_s_x_u16m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m8_u16( @@ -1552,7 +1552,7 @@ vuint16m4_t test_vmv_s_x_u16m4(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) { - return vmv_x_s_u16m8_u16(src); + return __riscv_vmv_x_s_u16m8_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m8( @@ -1561,7 +1561,7 @@ uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmv_s_x_u16m8(uint16_t src, size_t vl) { - return vmv_s_x_u16m8(src, vl); + return __riscv_vmv_s_x_u16m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32mf2_u32( @@ -1570,7 +1570,7 @@ vuint16m8_t test_vmv_s_x_u16m8(uint16_t src, size_t vl) { // CHECK-RV64-NEXT: ret i32 [[TMP0]] // uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) { - return vmv_x_s_u32mf2_u32(src); + return __riscv_vmv_x_s_u32mf2_u32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2( @@ -1579,7 +1579,7 @@ uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmv_s_x_u32mf2(uint32_t src, size_t vl) { - return vmv_s_x_u32mf2(src, vl); + return __riscv_vmv_s_x_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m1_u32( @@ -1588,7 +1588,7 @@ vuint32mf2_t test_vmv_s_x_u32mf2(uint32_t src, size_t vl) { // CHECK-RV64-NEXT: ret i32 [[TMP0]] // uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) { - return vmv_x_s_u32m1_u32(src); + return __riscv_vmv_x_s_u32m1_u32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m1( @@ -1597,7 +1597,7 @@ uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmv_s_x_u32m1(uint32_t src, size_t vl) { - return vmv_s_x_u32m1(src, vl); + return __riscv_vmv_s_x_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m2_u32( @@ -1606,7 +1606,7 @@ vuint32m1_t test_vmv_s_x_u32m1(uint32_t src, size_t vl) { // CHECK-RV64-NEXT: ret i32 [[TMP0]] // uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) { - return vmv_x_s_u32m2_u32(src); + return __riscv_vmv_x_s_u32m2_u32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m2( @@ -1615,7 +1615,7 @@ uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmv_s_x_u32m2(uint32_t src, size_t vl) { - return vmv_s_x_u32m2(src, vl); + return __riscv_vmv_s_x_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m4_u32( @@ -1624,7 +1624,7 @@ vuint32m2_t test_vmv_s_x_u32m2(uint32_t src, size_t vl) { // CHECK-RV64-NEXT: ret i32 [[TMP0]] // uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) { - return vmv_x_s_u32m4_u32(src); + return __riscv_vmv_x_s_u32m4_u32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m4( @@ -1633,7 +1633,7 @@ uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmv_s_x_u32m4(uint32_t src, size_t vl) { - return vmv_s_x_u32m4(src, vl); + return __riscv_vmv_s_x_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m8_u32( @@ -1642,7 +1642,7 @@ vuint32m4_t test_vmv_s_x_u32m4(uint32_t src, size_t vl) { // CHECK-RV64-NEXT: ret i32 [[TMP0]] // uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) { - return vmv_x_s_u32m8_u32(src); + return __riscv_vmv_x_s_u32m8_u32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m8( @@ -1651,7 +1651,7 @@ uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmv_s_x_u32m8(uint32_t src, size_t vl) { - return vmv_s_x_u32m8(src, vl); + return __riscv_vmv_s_x_u32m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u64m1_u64( @@ -1660,7 +1660,7 @@ vuint32m8_t test_vmv_s_x_u32m8(uint32_t src, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) { - return vmv_x_s_u64m1_u64(src); + return __riscv_vmv_x_s_u64m1_u64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m1( @@ -1669,7 +1669,7 @@ uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmv_s_x_u64m1(uint64_t src, size_t vl) { - return vmv_s_x_u64m1(src, vl); + return __riscv_vmv_s_x_u64m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u64m2_u64( @@ -1678,7 +1678,7 @@ vuint64m1_t test_vmv_s_x_u64m1(uint64_t src, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) { - return vmv_x_s_u64m2_u64(src); + return __riscv_vmv_x_s_u64m2_u64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m2( @@ -1687,7 +1687,7 @@ uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmv_s_x_u64m2(uint64_t src, size_t vl) { - return vmv_s_x_u64m2(src, vl); + return __riscv_vmv_s_x_u64m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u64m4_u64( @@ -1696,7 +1696,7 @@ vuint64m2_t test_vmv_s_x_u64m2(uint64_t src, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) { - return vmv_x_s_u64m4_u64(src); + return __riscv_vmv_x_s_u64m4_u64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m4( @@ -1705,7 +1705,7 @@ uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmv_s_x_u64m4(uint64_t src, size_t vl) { - return vmv_s_x_u64m4(src, vl); + return __riscv_vmv_s_x_u64m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u64m8_u64( @@ -1714,7 +1714,7 @@ vuint64m4_t test_vmv_s_x_u64m4(uint64_t src, size_t vl) { // CHECK-RV64-NEXT: ret i64 [[TMP0]] // uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) { - return vmv_x_s_u64m8_u64(src); + return __riscv_vmv_x_s_u64m8_u64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m8( @@ -1723,6 +1723,6 @@ uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmv_s_x_u64m8(uint64_t src, size_t vl) { - return vmv_s_x_u64m8(src, vl); + return __riscv_vmv_s_x_u64m8(src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmxnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmxnor.c index 786433bc3c60..2bd3ee15b01f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmxnor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmxnor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmxnor_mm_b1(op1, op2, vl); + return __riscv_vmxnor_mm_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxnor_mm_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmxnor_mm_b2(op1, op2, vl); + return __riscv_vmxnor_mm_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxnor_mm_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmxnor_mm_b4(op1, op2, vl); + return __riscv_vmxnor_mm_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxnor_mm_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmxnor_mm_b8(op1, op2, vl); + return __riscv_vmxnor_mm_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxnor_mm_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmxnor_mm_b16(op1, op2, vl); + return __riscv_vmxnor_mm_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxnor_mm_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmxnor_mm_b32(op1, op2, vl); + return __riscv_vmxnor_mm_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxnor_mm_b64( @@ -66,6 +66,6 @@ vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmxnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmxnor_mm_b64(op1, op2, vl); + return __riscv_vmxnor_mm_b64(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmxor.c index fd731402a61b..3073dd114338 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmxor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmxor_mm_b1(op1, op2, vl); + return __riscv_vmxor_mm_b1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxor_mm_b2( @@ -21,7 +21,7 @@ vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmxor_mm_b2(op1, op2, vl); + return __riscv_vmxor_mm_b2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxor_mm_b4( @@ -30,7 +30,7 @@ vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmxor_mm_b4(op1, op2, vl); + return __riscv_vmxor_mm_b4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxor_mm_b8( @@ -39,7 +39,7 @@ vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmxor_mm_b8(op1, op2, vl); + return __riscv_vmxor_mm_b8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxor_mm_b16( @@ -48,7 +48,7 @@ vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmxor_mm_b16(op1, op2, vl); + return __riscv_vmxor_mm_b16(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxor_mm_b32( @@ -57,7 +57,7 @@ vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmxor_mm_b32(op1, op2, vl); + return __riscv_vmxor_mm_b32(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmxor_mm_b64( @@ -66,6 +66,6 @@ vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmxor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmxor_mm_b64(op1, op2, vl); + return __riscv_vmxor_mm_b64(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c index 9fe8c7ba2da9..a3ad33821b5e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclip.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclip_wv_i8mf8(op1, shift, vl); + return __riscv_vnclip_wv_i8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf8(op1, shift, vl); + return __riscv_vnclip_wx_i8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclip_wv_i8mf4(op1, shift, vl); + return __riscv_vnclip_wv_i8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf4(op1, shift, vl); + return __riscv_vnclip_wx_i8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclip_wv_i8mf2(op1, shift, vl); + return __riscv_vnclip_wv_i8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf2(op1, shift, vl); + return __riscv_vnclip_wx_i8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclip_wv_i8m1(op1, shift, vl); + return __riscv_vnclip_wv_i8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m1(op1, shift, vl); + return __riscv_vnclip_wx_i8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclip_wv_i8m2(op1, shift, vl); + return __riscv_vnclip_wv_i8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m2(op1, shift, vl); + return __riscv_vnclip_wx_i8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclip_wv_i8m4(op1, shift, vl); + return __riscv_vnclip_wv_i8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m4(op1, shift, vl); + return __riscv_vnclip_wx_i8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4( @@ -120,7 +120,7 @@ vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclip_wv_i16mf4(op1, shift, vl); + return __riscv_vnclip_wv_i16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4( @@ -129,7 +129,7 @@ vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf4(op1, shift, vl); + return __riscv_vnclip_wx_i16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2( @@ -138,7 +138,7 @@ vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclip_wv_i16mf2(op1, shift, vl); + return __riscv_vnclip_wv_i16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2( @@ -147,7 +147,7 @@ vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf2(op1, shift, vl); + return __riscv_vnclip_wx_i16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1( @@ -156,7 +156,7 @@ vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclip_wv_i16m1(op1, shift, vl); + return __riscv_vnclip_wv_i16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1( @@ -165,7 +165,7 @@ vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m1(op1, shift, vl); + return __riscv_vnclip_wx_i16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2( @@ -174,7 +174,7 @@ vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclip_wv_i16m2(op1, shift, vl); + return __riscv_vnclip_wv_i16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2( @@ -183,7 +183,7 @@ vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m2(op1, shift, vl); + return __riscv_vnclip_wx_i16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4( @@ -192,7 +192,7 @@ vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclip_wv_i16m4(op1, shift, vl); + return __riscv_vnclip_wv_i16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4( @@ -201,7 +201,7 @@ vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m4(op1, shift, vl); + return __riscv_vnclip_wx_i16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2( @@ -210,7 +210,7 @@ vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclip_wv_i32mf2(op1, shift, vl); + return __riscv_vnclip_wv_i32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2( @@ -219,7 +219,7 @@ vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32mf2(op1, shift, vl); + return __riscv_vnclip_wx_i32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1( @@ -228,7 +228,7 @@ vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclip_wv_i32m1(op1, shift, vl); + return __riscv_vnclip_wv_i32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1( @@ -237,7 +237,7 @@ vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m1(op1, shift, vl); + return __riscv_vnclip_wx_i32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2( @@ -246,7 +246,7 @@ vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclip_wv_i32m2(op1, shift, vl); + return __riscv_vnclip_wv_i32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2( @@ -255,7 +255,7 @@ vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m2(op1, shift, vl); + return __riscv_vnclip_wx_i32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4( @@ -264,7 +264,7 @@ vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclip_wv_i32m4(op1, shift, vl); + return __riscv_vnclip_wv_i32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4( @@ -273,7 +273,7 @@ vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m4(op1, shift, vl); + return __riscv_vnclip_wx_i32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m( @@ -282,7 +282,7 @@ vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclip_wv_i8mf8_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m( @@ -291,7 +291,7 @@ vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf8_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m( @@ -300,7 +300,7 @@ vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclip_wv_i8mf4_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m( @@ -309,7 +309,7 @@ vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf4_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m( @@ -318,7 +318,7 @@ vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclip_wv_i8mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m( @@ -327,7 +327,7 @@ vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m( @@ -336,7 +336,7 @@ vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclip_wv_i8m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m( @@ -345,7 +345,7 @@ vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m( @@ -354,7 +354,7 @@ vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclip_wv_i8m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m( @@ -363,7 +363,7 @@ vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m( @@ -372,7 +372,7 @@ vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclip_wv_i8m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m( @@ -381,7 +381,7 @@ vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m( @@ -390,7 +390,7 @@ vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclip_wv_i16mf4_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m( @@ -399,7 +399,7 @@ vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf4_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m( @@ -408,7 +408,7 @@ vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclip_wv_i16mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m( @@ -417,7 +417,7 @@ vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m( @@ -426,7 +426,7 @@ vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclip_wv_i16m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m( @@ -435,7 +435,7 @@ vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m( @@ -444,7 +444,7 @@ vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclip_wv_i16m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m( @@ -453,7 +453,7 @@ vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m( @@ -462,7 +462,7 @@ vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclip_wv_i16m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m( @@ -471,7 +471,7 @@ vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m( @@ -480,7 +480,7 @@ vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclip_wv_i32mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m( @@ -489,7 +489,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32mf2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m( @@ -498,7 +498,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclip_wv_i32m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m( @@ -507,7 +507,7 @@ vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m1_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m( @@ -516,7 +516,7 @@ vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclip_wv_i32m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m( @@ -525,7 +525,7 @@ vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m2_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m( @@ -534,7 +534,7 @@ vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclip_wv_i32m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wv_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m( @@ -543,6 +543,6 @@ vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m4_m(mask, op1, shift, vl); + return __riscv_vnclip_wx_i32m4_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c index a7d8522b84fd..937cb0047493 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnclipu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclipu_wv_u8mf8(op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf8(op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclipu_wv_u8mf4(op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf4(op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclipu_wv_u8mf2(op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf2(op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclipu_wv_u8m1(op1, shift, vl); + return __riscv_vnclipu_wv_u8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m1(op1, shift, vl); + return __riscv_vnclipu_wx_u8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclipu_wv_u8m2(op1, shift, vl); + return __riscv_vnclipu_wv_u8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m2(op1, shift, vl); + return __riscv_vnclipu_wx_u8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclipu_wv_u8m4(op1, shift, vl); + return __riscv_vnclipu_wv_u8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m4(op1, shift, vl); + return __riscv_vnclipu_wx_u8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4( @@ -120,7 +120,7 @@ vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclipu_wv_u16mf4(op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4( @@ -129,7 +129,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf4(op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2( @@ -138,7 +138,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclipu_wv_u16mf2(op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2( @@ -147,7 +147,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf2(op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1( @@ -156,7 +156,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclipu_wv_u16m1(op1, shift, vl); + return __riscv_vnclipu_wv_u16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1( @@ -165,7 +165,7 @@ vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m1(op1, shift, vl); + return __riscv_vnclipu_wx_u16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2( @@ -174,7 +174,7 @@ vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclipu_wv_u16m2(op1, shift, vl); + return __riscv_vnclipu_wv_u16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2( @@ -183,7 +183,7 @@ vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m2(op1, shift, vl); + return __riscv_vnclipu_wx_u16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4( @@ -192,7 +192,7 @@ vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclipu_wv_u16m4(op1, shift, vl); + return __riscv_vnclipu_wv_u16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4( @@ -201,7 +201,7 @@ vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m4(op1, shift, vl); + return __riscv_vnclipu_wx_u16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2( @@ -210,7 +210,7 @@ vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclipu_wv_u32mf2(op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2( @@ -219,7 +219,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32mf2(op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1( @@ -228,7 +228,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclipu_wv_u32m1(op1, shift, vl); + return __riscv_vnclipu_wv_u32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1( @@ -237,7 +237,7 @@ vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m1(op1, shift, vl); + return __riscv_vnclipu_wx_u32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2( @@ -246,7 +246,7 @@ vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclipu_wv_u32m2(op1, shift, vl); + return __riscv_vnclipu_wv_u32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2( @@ -255,7 +255,7 @@ vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m2(op1, shift, vl); + return __riscv_vnclipu_wx_u32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4( @@ -264,7 +264,7 @@ vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclipu_wv_u32m4(op1, shift, vl); + return __riscv_vnclipu_wv_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4( @@ -273,7 +273,7 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m4(op1, shift, vl); + return __riscv_vnclipu_wx_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m( @@ -282,7 +282,7 @@ vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclipu_wv_u8mf8_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m( @@ -291,7 +291,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf8_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m( @@ -300,7 +300,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclipu_wv_u8mf4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m( @@ -309,7 +309,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m( @@ -318,7 +318,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclipu_wv_u8mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m( @@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m( @@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclipu_wv_u8m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m( @@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m( @@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclipu_wv_u8m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m( @@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m( @@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclipu_wv_u8m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m( @@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m( @@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclipu_wv_u16mf4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m( @@ -399,7 +399,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m( @@ -408,7 +408,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclipu_wv_u16mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m( @@ -417,7 +417,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m( @@ -426,7 +426,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclipu_wv_u16m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m( @@ -435,7 +435,7 @@ vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m( @@ -444,7 +444,7 @@ vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclipu_wv_u16m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m( @@ -453,7 +453,7 @@ vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m( @@ -462,7 +462,7 @@ vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclipu_wv_u16m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m( @@ -471,7 +471,7 @@ vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m( @@ -480,7 +480,7 @@ vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclipu_wv_u32mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m( @@ -489,7 +489,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32mf2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m( @@ -498,7 +498,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclipu_wv_u32m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m( @@ -507,7 +507,7 @@ vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m1_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m( @@ -516,7 +516,7 @@ vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclipu_wv_u32m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m( @@ -525,7 +525,7 @@ vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m2_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m( @@ -534,7 +534,7 @@ vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclipu_wv_u32m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wv_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m( @@ -543,6 +543,6 @@ vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m4_m(mask, op1, shift, vl); + return __riscv_vnclipu_wx_u32m4_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vncvt.c index e29c4c6d1d4b..825369b165a4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vncvt.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8(vint16mf4_t src, size_t vl) { - return vncvt_x_x_w_i8mf8(src, vl); + return __riscv_vncvt_x_x_w_i8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4( @@ -21,7 +21,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8(vint16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4(vint16mf2_t src, size_t vl) { - return vncvt_x_x_w_i8mf4(src, vl); + return __riscv_vncvt_x_x_w_i8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2( @@ -30,7 +30,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4(vint16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2(vint16m1_t src, size_t vl) { - return vncvt_x_x_w_i8mf2(src, vl); + return __riscv_vncvt_x_x_w_i8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1( @@ -39,7 +39,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2(vint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1(vint16m2_t src, size_t vl) { - return vncvt_x_x_w_i8m1(src, vl); + return __riscv_vncvt_x_x_w_i8m1(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2( @@ -48,7 +48,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1(vint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2(vint16m4_t src, size_t vl) { - return vncvt_x_x_w_i8m2(src, vl); + return __riscv_vncvt_x_x_w_i8m2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4( @@ -57,7 +57,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2(vint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4(vint16m8_t src, size_t vl) { - return vncvt_x_x_w_i8m4(src, vl); + return __riscv_vncvt_x_x_w_i8m4(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8( @@ -66,7 +66,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4(vint16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8(vuint16mf4_t src, size_t vl) { - return vncvt_x_x_w_u8mf8(src, vl); + return __riscv_vncvt_x_x_w_u8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4( @@ -75,7 +75,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8(vuint16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4(vuint16mf2_t src, size_t vl) { - return vncvt_x_x_w_u8mf4(src, vl); + return __riscv_vncvt_x_x_w_u8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2( @@ -84,7 +84,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4(vuint16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2(vuint16m1_t src, size_t vl) { - return vncvt_x_x_w_u8mf2(src, vl); + return __riscv_vncvt_x_x_w_u8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1( @@ -93,7 +93,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2(vuint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1(vuint16m2_t src, size_t vl) { - return vncvt_x_x_w_u8m1(src, vl); + return __riscv_vncvt_x_x_w_u8m1(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2( @@ -102,7 +102,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1(vuint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2(vuint16m4_t src, size_t vl) { - return vncvt_x_x_w_u8m2(src, vl); + return __riscv_vncvt_x_x_w_u8m2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4( @@ -111,7 +111,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2(vuint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4(vuint16m8_t src, size_t vl) { - return vncvt_x_x_w_u8m4(src, vl); + return __riscv_vncvt_x_x_w_u8m4(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4( @@ -120,7 +120,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4(vuint16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4(vint32mf2_t src, size_t vl) { - return vncvt_x_x_w_i16mf4(src, vl); + return __riscv_vncvt_x_x_w_i16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2( @@ -129,7 +129,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4(vint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2(vint32m1_t src, size_t vl) { - return vncvt_x_x_w_i16mf2(src, vl); + return __riscv_vncvt_x_x_w_i16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1( @@ -138,7 +138,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2(vint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1(vint32m2_t src, size_t vl) { - return vncvt_x_x_w_i16m1(src, vl); + return __riscv_vncvt_x_x_w_i16m1(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2( @@ -147,7 +147,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1(vint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2(vint32m4_t src, size_t vl) { - return vncvt_x_x_w_i16m2(src, vl); + return __riscv_vncvt_x_x_w_i16m2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4( @@ -156,7 +156,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2(vint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4(vint32m8_t src, size_t vl) { - return vncvt_x_x_w_i16m4(src, vl); + return __riscv_vncvt_x_x_w_i16m4(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4( @@ -165,7 +165,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4(vint32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4(vuint32mf2_t src, size_t vl) { - return vncvt_x_x_w_u16mf4(src, vl); + return __riscv_vncvt_x_x_w_u16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2( @@ -174,7 +174,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4(vuint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2(vuint32m1_t src, size_t vl) { - return vncvt_x_x_w_u16mf2(src, vl); + return __riscv_vncvt_x_x_w_u16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1( @@ -183,7 +183,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2(vuint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1(vuint32m2_t src, size_t vl) { - return vncvt_x_x_w_u16m1(src, vl); + return __riscv_vncvt_x_x_w_u16m1(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1(vuint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2(vuint32m4_t src, size_t vl) { - return vncvt_x_x_w_u16m2(src, vl); + return __riscv_vncvt_x_x_w_u16m2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4( @@ -201,7 +201,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2(vuint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4(vuint32m8_t src, size_t vl) { - return vncvt_x_x_w_u16m4(src, vl); + return __riscv_vncvt_x_x_w_u16m4(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2( @@ -210,7 +210,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4(vuint32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2(vint64m1_t src, size_t vl) { - return vncvt_x_x_w_i32mf2(src, vl); + return __riscv_vncvt_x_x_w_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1( @@ -219,7 +219,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2(vint64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1(vint64m2_t src, size_t vl) { - return vncvt_x_x_w_i32m1(src, vl); + return __riscv_vncvt_x_x_w_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2( @@ -228,7 +228,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1(vint64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2(vint64m4_t src, size_t vl) { - return vncvt_x_x_w_i32m2(src, vl); + return __riscv_vncvt_x_x_w_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4( @@ -237,7 +237,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2(vint64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4(vint64m8_t src, size_t vl) { - return vncvt_x_x_w_i32m4(src, vl); + return __riscv_vncvt_x_x_w_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2( @@ -246,7 +246,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4(vint64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2(vuint64m1_t src, size_t vl) { - return vncvt_x_x_w_u32mf2(src, vl); + return __riscv_vncvt_x_x_w_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1( @@ -255,7 +255,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2(vuint64m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1(vuint64m2_t src, size_t vl) { - return vncvt_x_x_w_u32m1(src, vl); + return __riscv_vncvt_x_x_w_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2( @@ -264,7 +264,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1(vuint64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2(vuint64m4_t src, size_t vl) { - return vncvt_x_x_w_u32m2(src, vl); + return __riscv_vncvt_x_x_w_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4( @@ -273,7 +273,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2(vuint64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4(vuint64m8_t src, size_t vl) { - return vncvt_x_x_w_u32m4(src, vl); + return __riscv_vncvt_x_x_w_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_m( @@ -282,7 +282,7 @@ vuint32m4_t test_vncvt_x_x_w_u32m4(vuint64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return vncvt_x_x_w_i8mf8_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_m( @@ -291,7 +291,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t mask, vint16mf4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return vncvt_x_x_w_i8mf4_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_m( @@ -300,7 +300,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t mask, vint16mf2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return vncvt_x_x_w_i8mf2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_m( @@ -309,7 +309,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t mask, vint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return vncvt_x_x_w_i8m1_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_m( @@ -318,7 +318,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t mask, vint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return vncvt_x_x_w_i8m2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_m( @@ -327,7 +327,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t mask, vint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t mask, vint16m8_t src, size_t vl) { - return vncvt_x_x_w_i8m4_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_m( @@ -336,7 +336,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t mask, vint16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return vncvt_x_x_w_u8mf8_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u8mf8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_m( @@ -345,7 +345,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t mask, vuint16mf4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return vncvt_x_x_w_u8mf4_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u8mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_m( @@ -354,7 +354,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t mask, vuint16mf2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return vncvt_x_x_w_u8mf2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u8mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_m( @@ -363,7 +363,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t mask, vuint16m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return vncvt_x_x_w_u8m1_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u8m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_m( @@ -372,7 +372,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t mask, vuint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return vncvt_x_x_w_u8m2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u8m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_m( @@ -381,7 +381,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t mask, vuint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t mask, vuint16m8_t src, size_t vl) { - return vncvt_x_x_w_u8m4_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u8m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_m( @@ -390,7 +390,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t mask, vuint16m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return vncvt_x_x_w_i16mf4_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_m( @@ -399,7 +399,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return vncvt_x_x_w_i16mf2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_m( @@ -408,7 +408,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return vncvt_x_x_w_i16m1_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_m( @@ -417,7 +417,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return vncvt_x_x_w_i16m2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_m( @@ -426,7 +426,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return vncvt_x_x_w_i16m4_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_m( @@ -435,7 +435,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return vncvt_x_x_w_u16mf4_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_m( @@ -444,7 +444,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return vncvt_x_x_w_u16mf2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_m( @@ -453,7 +453,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t mask, vuint32m1_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return vncvt_x_x_w_u16m1_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_m( @@ -462,7 +462,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return vncvt_x_x_w_u16m2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_m( @@ -471,7 +471,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return vncvt_x_x_w_u16m4_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_m( @@ -480,7 +480,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return vncvt_x_x_w_i32mf2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_m( @@ -489,7 +489,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return vncvt_x_x_w_i32m1_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_m( @@ -498,7 +498,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return vncvt_x_x_w_i32m2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_m( @@ -507,7 +507,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return vncvt_x_x_w_i32m4_m(mask, src, vl); + return __riscv_vncvt_x_x_w_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_m( @@ -516,7 +516,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return vncvt_x_x_w_u32mf2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_m( @@ -525,7 +525,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t mask, vuint64m1_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return vncvt_x_x_w_u32m1_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_m( @@ -534,7 +534,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return vncvt_x_x_w_u32m2_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_m( @@ -543,6 +543,6 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return vncvt_x_x_w_u32m4_m(mask, src, vl); + return __riscv_vncvt_x_x_w_u32m4_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vneg.c index c8ed4d98668d..d79593ce4a5e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vneg.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8(vint8mf8_t op1, size_t vl) { - return vneg_v_i8mf8(op1, vl); + return __riscv_vneg_v_i8mf8(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf4( @@ -22,7 +22,7 @@ vint8mf8_t test_vneg_v_i8mf8(vint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4(vint8mf4_t op1, size_t vl) { - return vneg_v_i8mf4(op1, vl); + return __riscv_vneg_v_i8mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf2( @@ -31,7 +31,7 @@ vint8mf4_t test_vneg_v_i8mf4(vint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2(vint8mf2_t op1, size_t vl) { - return vneg_v_i8mf2(op1, vl); + return __riscv_vneg_v_i8mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m1( @@ -40,7 +40,7 @@ vint8mf2_t test_vneg_v_i8mf2(vint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1(vint8m1_t op1, size_t vl) { - return vneg_v_i8m1(op1, vl); + return __riscv_vneg_v_i8m1(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m2( @@ -49,7 +49,7 @@ vint8m1_t test_vneg_v_i8m1(vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2(vint8m2_t op1, size_t vl) { - return vneg_v_i8m2(op1, vl); + return __riscv_vneg_v_i8m2(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m4( @@ -58,7 +58,7 @@ vint8m2_t test_vneg_v_i8m2(vint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4(vint8m4_t op1, size_t vl) { - return vneg_v_i8m4(op1, vl); + return __riscv_vneg_v_i8m4(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m8( @@ -67,7 +67,7 @@ vint8m4_t test_vneg_v_i8m4(vint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8(vint8m8_t op1, size_t vl) { - return vneg_v_i8m8(op1, vl); + return __riscv_vneg_v_i8m8(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf4( @@ -76,7 +76,7 @@ vint8m8_t test_vneg_v_i8m8(vint8m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4(vint16mf4_t op1, size_t vl) { - return vneg_v_i16mf4(op1, vl); + return __riscv_vneg_v_i16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf2( @@ -85,7 +85,7 @@ vint16mf4_t test_vneg_v_i16mf4(vint16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2(vint16mf2_t op1, size_t vl) { - return vneg_v_i16mf2(op1, vl); + return __riscv_vneg_v_i16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m1( @@ -94,7 +94,7 @@ vint16mf2_t test_vneg_v_i16mf2(vint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1(vint16m1_t op1, size_t vl) { - return vneg_v_i16m1(op1, vl); + return __riscv_vneg_v_i16m1(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m2( @@ -103,7 +103,7 @@ vint16m1_t test_vneg_v_i16m1(vint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2(vint16m2_t op1, size_t vl) { - return vneg_v_i16m2(op1, vl); + return __riscv_vneg_v_i16m2(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m4( @@ -112,7 +112,7 @@ vint16m2_t test_vneg_v_i16m2(vint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4(vint16m4_t op1, size_t vl) { - return vneg_v_i16m4(op1, vl); + return __riscv_vneg_v_i16m4(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m8( @@ -121,7 +121,7 @@ vint16m4_t test_vneg_v_i16m4(vint16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8(vint16m8_t op1, size_t vl) { - return vneg_v_i16m8(op1, vl); + return __riscv_vneg_v_i16m8(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32mf2( @@ -130,7 +130,7 @@ vint16m8_t test_vneg_v_i16m8(vint16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2(vint32mf2_t op1, size_t vl) { - return vneg_v_i32mf2(op1, vl); + return __riscv_vneg_v_i32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m1( @@ -139,7 +139,7 @@ vint32mf2_t test_vneg_v_i32mf2(vint32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1(vint32m1_t op1, size_t vl) { - return vneg_v_i32m1(op1, vl); + return __riscv_vneg_v_i32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m2( @@ -148,7 +148,7 @@ vint32m1_t test_vneg_v_i32m1(vint32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2(vint32m2_t op1, size_t vl) { - return vneg_v_i32m2(op1, vl); + return __riscv_vneg_v_i32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m4( @@ -157,7 +157,7 @@ vint32m2_t test_vneg_v_i32m2(vint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4(vint32m4_t op1, size_t vl) { - return vneg_v_i32m4(op1, vl); + return __riscv_vneg_v_i32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m8( @@ -166,7 +166,7 @@ vint32m4_t test_vneg_v_i32m4(vint32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8(vint32m8_t op1, size_t vl) { - return vneg_v_i32m8(op1, vl); + return __riscv_vneg_v_i32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m1( @@ -175,7 +175,7 @@ vint32m8_t test_vneg_v_i32m8(vint32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1(vint64m1_t op1, size_t vl) { - return vneg_v_i64m1(op1, vl); + return __riscv_vneg_v_i64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m2( @@ -184,7 +184,7 @@ vint64m1_t test_vneg_v_i64m1(vint64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2(vint64m2_t op1, size_t vl) { - return vneg_v_i64m2(op1, vl); + return __riscv_vneg_v_i64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m4( @@ -193,7 +193,7 @@ vint64m2_t test_vneg_v_i64m2(vint64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4(vint64m4_t op1, size_t vl) { - return vneg_v_i64m4(op1, vl); + return __riscv_vneg_v_i64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m8( @@ -202,7 +202,7 @@ vint64m4_t test_vneg_v_i64m4(vint64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8(vint64m8_t op1, size_t vl) { - return vneg_v_i64m8(op1, vl); + return __riscv_vneg_v_i64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf8_m( @@ -211,7 +211,7 @@ vint64m8_t test_vneg_v_i64m8(vint64m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return vneg_v_i8mf8_m(mask, op1, vl); + return __riscv_vneg_v_i8mf8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_m( @@ -220,7 +220,7 @@ vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return vneg_v_i8mf4_m(mask, op1, vl); + return __riscv_vneg_v_i8mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_m( @@ -229,7 +229,7 @@ vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return vneg_v_i8mf2_m(mask, op1, vl); + return __riscv_vneg_v_i8mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m1_m( @@ -238,7 +238,7 @@ vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return vneg_v_i8m1_m(mask, op1, vl); + return __riscv_vneg_v_i8m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m2_m( @@ -247,7 +247,7 @@ vint8m1_t test_vneg_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return vneg_v_i8m2_m(mask, op1, vl); + return __riscv_vneg_v_i8m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m4_m( @@ -256,7 +256,7 @@ vint8m2_t test_vneg_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { - return vneg_v_i8m4_m(mask, op1, vl); + return __riscv_vneg_v_i8m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m8_m( @@ -265,7 +265,7 @@ vint8m4_t test_vneg_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { - return vneg_v_i8m8_m(mask, op1, vl); + return __riscv_vneg_v_i8m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_m( @@ -274,7 +274,7 @@ vint8m8_t test_vneg_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return vneg_v_i16mf4_m(mask, op1, vl); + return __riscv_vneg_v_i16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_m( @@ -283,7 +283,7 @@ vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return vneg_v_i16mf2_m(mask, op1, vl); + return __riscv_vneg_v_i16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m1_m( @@ -292,7 +292,7 @@ vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return vneg_v_i16m1_m(mask, op1, vl); + return __riscv_vneg_v_i16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m2_m( @@ -301,7 +301,7 @@ vint16m1_t test_vneg_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return vneg_v_i16m2_m(mask, op1, vl); + return __riscv_vneg_v_i16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m4_m( @@ -310,7 +310,7 @@ vint16m2_t test_vneg_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { - return vneg_v_i16m4_m(mask, op1, vl); + return __riscv_vneg_v_i16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m8_m( @@ -319,7 +319,7 @@ vint16m4_t test_vneg_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { - return vneg_v_i16m8_m(mask, op1, vl); + return __riscv_vneg_v_i16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_m( @@ -328,7 +328,7 @@ vint16m8_t test_vneg_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { - return vneg_v_i32mf2_m(mask, op1, vl); + return __riscv_vneg_v_i32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m1_m( @@ -337,7 +337,7 @@ vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { - return vneg_v_i32m1_m(mask, op1, vl); + return __riscv_vneg_v_i32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m2_m( @@ -346,7 +346,7 @@ vint32m1_t test_vneg_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { - return vneg_v_i32m2_m(mask, op1, vl); + return __riscv_vneg_v_i32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m4_m( @@ -355,7 +355,7 @@ vint32m2_t test_vneg_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { - return vneg_v_i32m4_m(mask, op1, vl); + return __riscv_vneg_v_i32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m8_m( @@ -364,7 +364,7 @@ vint32m4_t test_vneg_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { - return vneg_v_i32m8_m(mask, op1, vl); + return __riscv_vneg_v_i32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m1_m( @@ -373,7 +373,7 @@ vint32m8_t test_vneg_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { - return vneg_v_i64m1_m(mask, op1, vl); + return __riscv_vneg_v_i64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m2_m( @@ -382,7 +382,7 @@ vint64m1_t test_vneg_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { - return vneg_v_i64m2_m(mask, op1, vl); + return __riscv_vneg_v_i64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m4_m( @@ -391,7 +391,7 @@ vint64m2_t test_vneg_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { - return vneg_v_i64m4_m(mask, op1, vl); + return __riscv_vneg_v_i64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m8_m( @@ -400,6 +400,6 @@ vint64m4_t test_vneg_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) { - return vneg_v_i64m8_m(mask, op1, vl); + return __riscv_vneg_v_i64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c index 1bb938bbc519..d23f965b759d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsac.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vv_i8mf8(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8( @@ -22,7 +22,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vx_i8mf8(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4( @@ -31,7 +31,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vv_i8mf4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4( @@ -40,7 +40,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vx_i8mf4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2( @@ -49,7 +49,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vv_i8mf2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2( @@ -58,7 +58,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vx_i8mf2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1( @@ -67,7 +67,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vv_i8m1(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1( @@ -76,7 +76,7 @@ vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vx_i8m1(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2( @@ -85,7 +85,7 @@ vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vv_i8m2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2( @@ -94,7 +94,7 @@ vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vx_i8m2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4( @@ -103,7 +103,7 @@ vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vv_i8m4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4( @@ -112,7 +112,7 @@ vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vx_i8m4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8( @@ -121,7 +121,7 @@ vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vv_i8m8(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8( @@ -130,7 +130,7 @@ vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vx_i8m8(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4( @@ -139,7 +139,7 @@ vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vv_i16mf4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4( @@ -148,7 +148,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vx_i16mf4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2( @@ -157,7 +157,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vv_i16mf2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2( @@ -166,7 +166,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vx_i16mf2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1( @@ -175,7 +175,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vv_i16m1(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1( @@ -184,7 +184,7 @@ vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vx_i16m1(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2( @@ -193,7 +193,7 @@ vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vv_i16m2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2( @@ -202,7 +202,7 @@ vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vx_i16m2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4( @@ -211,7 +211,7 @@ vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vv_i16m4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4( @@ -220,7 +220,7 @@ vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vx_i16m4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8( @@ -229,7 +229,7 @@ vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vv_i16m8(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8( @@ -238,7 +238,7 @@ vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vx_i16m8(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2( @@ -247,7 +247,7 @@ vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vv_i32mf2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2( @@ -256,7 +256,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vx_i32mf2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1( @@ -265,7 +265,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vv_i32m1(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1( @@ -274,7 +274,7 @@ vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vx_i32m1(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2( @@ -283,7 +283,7 @@ vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vv_i32m2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2( @@ -292,7 +292,7 @@ vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vx_i32m2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4( @@ -301,7 +301,7 @@ vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vv_i32m4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4( @@ -310,7 +310,7 @@ vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vx_i32m4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8( @@ -319,7 +319,7 @@ vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vv_i32m8(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8( @@ -328,7 +328,7 @@ vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vx_i32m8(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1( @@ -337,7 +337,7 @@ vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vv_i64m1(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1( @@ -346,7 +346,7 @@ vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vx_i64m1(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2( @@ -355,7 +355,7 @@ vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vv_i64m2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2( @@ -364,7 +364,7 @@ vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vx_i64m2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4( @@ -373,7 +373,7 @@ vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vv_i64m4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4( @@ -382,7 +382,7 @@ vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vx_i64m4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8( @@ -391,7 +391,7 @@ vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vv_i64m8(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8( @@ -400,7 +400,7 @@ vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vx_i64m8(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8( @@ -409,7 +409,7 @@ vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vv_u8mf8(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8( @@ -418,7 +418,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vx_u8mf8(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4( @@ -427,7 +427,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vv_u8mf4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4( @@ -436,7 +436,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vx_u8mf4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2( @@ -445,7 +445,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vv_u8mf2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2( @@ -454,7 +454,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vx_u8mf2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1( @@ -463,7 +463,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vv_u8m1(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1( @@ -472,7 +472,7 @@ vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vx_u8m1(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2( @@ -481,7 +481,7 @@ vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vv_u8m2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2( @@ -490,7 +490,7 @@ vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vx_u8m2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4( @@ -499,7 +499,7 @@ vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vv_u8m4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4( @@ -508,7 +508,7 @@ vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vx_u8m4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8( @@ -517,7 +517,7 @@ vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vv_u8m8(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8( @@ -526,7 +526,7 @@ vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vx_u8m8(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4( @@ -535,7 +535,7 @@ vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vv_u16mf4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4( @@ -544,7 +544,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vx_u16mf4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2( @@ -553,7 +553,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vv_u16mf2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2( @@ -562,7 +562,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vx_u16mf2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1( @@ -571,7 +571,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vv_u16m1(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1( @@ -580,7 +580,7 @@ vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vx_u16m1(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2( @@ -589,7 +589,7 @@ vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vv_u16m2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2( @@ -598,7 +598,7 @@ vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vx_u16m2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4( @@ -607,7 +607,7 @@ vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vv_u16m4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4( @@ -616,7 +616,7 @@ vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vx_u16m4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8( @@ -625,7 +625,7 @@ vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vv_u16m8(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8( @@ -634,7 +634,7 @@ vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vx_u16m8(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2( @@ -643,7 +643,7 @@ vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vv_u32mf2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2( @@ -652,7 +652,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vx_u32mf2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1( @@ -661,7 +661,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vv_u32m1(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1( @@ -670,7 +670,7 @@ vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vx_u32m1(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2( @@ -679,7 +679,7 @@ vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vv_u32m2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2( @@ -688,7 +688,7 @@ vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vx_u32m2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4( @@ -697,7 +697,7 @@ vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vv_u32m4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4( @@ -706,7 +706,7 @@ vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vx_u32m4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8( @@ -715,7 +715,7 @@ vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vv_u32m8(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8( @@ -724,7 +724,7 @@ vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vx_u32m8(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1( @@ -733,7 +733,7 @@ vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vv_u64m1(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1( @@ -742,7 +742,7 @@ vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vx_u64m1(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2( @@ -751,7 +751,7 @@ vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vv_u64m2(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2( @@ -760,7 +760,7 @@ vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vx_u64m2(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4( @@ -769,7 +769,7 @@ vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vv_u64m4(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4( @@ -778,7 +778,7 @@ vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vx_u64m4(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8( @@ -787,7 +787,7 @@ vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vv_u64m8(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8( @@ -796,7 +796,7 @@ vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vx_u64m8(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m( @@ -805,7 +805,7 @@ vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vv_i8mf8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m( @@ -814,7 +814,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vx_i8mf8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m( @@ -823,7 +823,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vv_i8mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m( @@ -832,7 +832,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vx_i8mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m( @@ -841,7 +841,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vv_i8mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m( @@ -850,7 +850,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vx_i8mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m( @@ -859,7 +859,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vv_i8m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m( @@ -868,7 +868,7 @@ vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vx_i8m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m( @@ -877,7 +877,7 @@ vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vv_i8m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m( @@ -886,7 +886,7 @@ vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vx_i8m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m( @@ -895,7 +895,7 @@ vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vv_i8m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m( @@ -904,7 +904,7 @@ vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vx_i8m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m( @@ -913,7 +913,7 @@ vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vv_i8m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m( @@ -922,7 +922,7 @@ vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vx_i8m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m( @@ -931,7 +931,7 @@ vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vv_i16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m( @@ -940,7 +940,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vx_i16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m( @@ -949,7 +949,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vv_i16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m( @@ -958,7 +958,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vx_i16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m( @@ -967,7 +967,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vv_i16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m( @@ -976,7 +976,7 @@ vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vx_i16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m( @@ -985,7 +985,7 @@ vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vv_i16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m( @@ -994,7 +994,7 @@ vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vx_i16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m( @@ -1003,7 +1003,7 @@ vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vv_i16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m( @@ -1012,7 +1012,7 @@ vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vx_i16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m( @@ -1021,7 +1021,7 @@ vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vv_i16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m( @@ -1030,7 +1030,7 @@ vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vx_i16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m( @@ -1039,7 +1039,7 @@ vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vv_i32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m( @@ -1048,7 +1048,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vx_i32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m( @@ -1057,7 +1057,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vv_i32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m( @@ -1066,7 +1066,7 @@ vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vx_i32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m( @@ -1075,7 +1075,7 @@ vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vv_i32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m( @@ -1084,7 +1084,7 @@ vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vx_i32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m( @@ -1093,7 +1093,7 @@ vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vv_i32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m( @@ -1102,7 +1102,7 @@ vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vx_i32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m( @@ -1111,7 +1111,7 @@ vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vv_i32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m( @@ -1120,7 +1120,7 @@ vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vx_i32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m( @@ -1129,7 +1129,7 @@ vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vv_i64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m( @@ -1138,7 +1138,7 @@ vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vx_i64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m( @@ -1147,7 +1147,7 @@ vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vv_i64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m( @@ -1156,7 +1156,7 @@ vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vx_i64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m( @@ -1165,7 +1165,7 @@ vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vv_i64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m( @@ -1174,7 +1174,7 @@ vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vx_i64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m( @@ -1183,7 +1183,7 @@ vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vv_i64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m( @@ -1192,7 +1192,7 @@ vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vx_i64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m( @@ -1201,7 +1201,7 @@ vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vv_u8mf8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m( @@ -1210,7 +1210,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vx_u8mf8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m( @@ -1219,7 +1219,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vv_u8mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m( @@ -1228,7 +1228,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vx_u8mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m( @@ -1237,7 +1237,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vv_u8mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m( @@ -1246,7 +1246,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vx_u8mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m( @@ -1255,7 +1255,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vv_u8m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m( @@ -1264,7 +1264,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vx_u8m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m( @@ -1273,7 +1273,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vv_u8m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m( @@ -1282,7 +1282,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vx_u8m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m( @@ -1291,7 +1291,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vv_u8m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m( @@ -1300,7 +1300,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vx_u8m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m( @@ -1309,7 +1309,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vv_u8m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m( @@ -1318,7 +1318,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vx_u8m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m( @@ -1327,7 +1327,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vv_u16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m( @@ -1336,7 +1336,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vx_u16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m( @@ -1345,7 +1345,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vv_u16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m( @@ -1354,7 +1354,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vx_u16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m( @@ -1363,7 +1363,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vv_u16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m( @@ -1372,7 +1372,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vx_u16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m( @@ -1381,7 +1381,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vv_u16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m( @@ -1390,7 +1390,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vx_u16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m( @@ -1399,7 +1399,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vv_u16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m( @@ -1408,7 +1408,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vx_u16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m( @@ -1417,7 +1417,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vv_u16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m( @@ -1426,7 +1426,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vx_u16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m( @@ -1435,7 +1435,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vv_u32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m( @@ -1444,7 +1444,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vx_u32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m( @@ -1453,7 +1453,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vv_u32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m( @@ -1462,7 +1462,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vx_u32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m( @@ -1471,7 +1471,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vv_u32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m( @@ -1480,7 +1480,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vx_u32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m( @@ -1489,7 +1489,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vv_u32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m( @@ -1498,7 +1498,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vx_u32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m( @@ -1507,7 +1507,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vv_u32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m( @@ -1516,7 +1516,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vx_u32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m( @@ -1525,7 +1525,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vv_u64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m( @@ -1534,7 +1534,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vx_u64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m( @@ -1543,7 +1543,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vv_u64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m( @@ -1552,7 +1552,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vx_u64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m( @@ -1561,7 +1561,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vv_u64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m( @@ -1570,7 +1570,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vx_u64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m( @@ -1579,7 +1579,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vv_u64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m( @@ -1588,6 +1588,6 @@ vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vx_u64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c index f19c55926c22..679eafedf765 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnmsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vv_i8mf8(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8( @@ -22,7 +22,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vx_i8mf8(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4( @@ -31,7 +31,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vv_i8mf4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4( @@ -40,7 +40,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vx_i8mf4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2( @@ -49,7 +49,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vv_i8mf2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2( @@ -58,7 +58,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vx_i8mf2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1( @@ -67,7 +67,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vv_i8m1(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1( @@ -76,7 +76,7 @@ vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vx_i8m1(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2( @@ -85,7 +85,7 @@ vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vv_i8m2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2( @@ -94,7 +94,7 @@ vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vx_i8m2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4( @@ -103,7 +103,7 @@ vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vv_i8m4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4( @@ -112,7 +112,7 @@ vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vx_i8m4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8( @@ -121,7 +121,7 @@ vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vv_i8m8(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8( @@ -130,7 +130,7 @@ vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vx_i8m8(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4( @@ -139,7 +139,7 @@ vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vv_i16mf4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4( @@ -148,7 +148,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vx_i16mf4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2( @@ -157,7 +157,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vv_i16mf2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2( @@ -166,7 +166,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vx_i16mf2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1( @@ -175,7 +175,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vv_i16m1(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1( @@ -184,7 +184,7 @@ vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vx_i16m1(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2( @@ -193,7 +193,7 @@ vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vv_i16m2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2( @@ -202,7 +202,7 @@ vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vx_i16m2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4( @@ -211,7 +211,7 @@ vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vv_i16m4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4( @@ -220,7 +220,7 @@ vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vx_i16m4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8( @@ -229,7 +229,7 @@ vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vv_i16m8(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8( @@ -238,7 +238,7 @@ vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vx_i16m8(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2( @@ -247,7 +247,7 @@ vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vv_i32mf2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2( @@ -256,7 +256,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vx_i32mf2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1( @@ -265,7 +265,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vv_i32m1(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1( @@ -274,7 +274,7 @@ vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vx_i32m1(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2( @@ -283,7 +283,7 @@ vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vv_i32m2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2( @@ -292,7 +292,7 @@ vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vx_i32m2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4( @@ -301,7 +301,7 @@ vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vv_i32m4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4( @@ -310,7 +310,7 @@ vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vx_i32m4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8( @@ -319,7 +319,7 @@ vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vv_i32m8(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8( @@ -328,7 +328,7 @@ vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vx_i32m8(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1( @@ -337,7 +337,7 @@ vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vv_i64m1(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1( @@ -346,7 +346,7 @@ vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vx_i64m1(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2( @@ -355,7 +355,7 @@ vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vv_i64m2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2( @@ -364,7 +364,7 @@ vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vx_i64m2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4( @@ -373,7 +373,7 @@ vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vv_i64m4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4( @@ -382,7 +382,7 @@ vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vx_i64m4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8( @@ -391,7 +391,7 @@ vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vv_i64m8(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8( @@ -400,7 +400,7 @@ vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vx_i64m8(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8( @@ -409,7 +409,7 @@ vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vv_u8mf8(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8( @@ -418,7 +418,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vx_u8mf8(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4( @@ -427,7 +427,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vv_u8mf4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4( @@ -436,7 +436,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vx_u8mf4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2( @@ -445,7 +445,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vv_u8mf2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2( @@ -454,7 +454,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vx_u8mf2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1( @@ -463,7 +463,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vv_u8m1(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1( @@ -472,7 +472,7 @@ vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vx_u8m1(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2( @@ -481,7 +481,7 @@ vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vv_u8m2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2( @@ -490,7 +490,7 @@ vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vx_u8m2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4( @@ -499,7 +499,7 @@ vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vv_u8m4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4( @@ -508,7 +508,7 @@ vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vx_u8m4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8( @@ -517,7 +517,7 @@ vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vv_u8m8(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8( @@ -526,7 +526,7 @@ vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vx_u8m8(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4( @@ -535,7 +535,7 @@ vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vv_u16mf4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4( @@ -544,7 +544,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vx_u16mf4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2( @@ -553,7 +553,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vv_u16mf2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2( @@ -562,7 +562,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vx_u16mf2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1( @@ -571,7 +571,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vv_u16m1(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1( @@ -580,7 +580,7 @@ vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vx_u16m1(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2( @@ -589,7 +589,7 @@ vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vv_u16m2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2( @@ -598,7 +598,7 @@ vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vx_u16m2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4( @@ -607,7 +607,7 @@ vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vv_u16m4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4( @@ -616,7 +616,7 @@ vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vx_u16m4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8( @@ -625,7 +625,7 @@ vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vv_u16m8(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8( @@ -634,7 +634,7 @@ vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vx_u16m8(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2( @@ -643,7 +643,7 @@ vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vv_u32mf2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2( @@ -652,7 +652,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vx_u32mf2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1( @@ -661,7 +661,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vv_u32m1(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1( @@ -670,7 +670,7 @@ vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vx_u32m1(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2( @@ -679,7 +679,7 @@ vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vv_u32m2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2( @@ -688,7 +688,7 @@ vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vx_u32m2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4( @@ -697,7 +697,7 @@ vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vv_u32m4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4( @@ -706,7 +706,7 @@ vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vx_u32m4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8( @@ -715,7 +715,7 @@ vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vv_u32m8(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8( @@ -724,7 +724,7 @@ vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vx_u32m8(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1( @@ -733,7 +733,7 @@ vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vv_u64m1(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1( @@ -742,7 +742,7 @@ vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vx_u64m1(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2( @@ -751,7 +751,7 @@ vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vv_u64m2(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2( @@ -760,7 +760,7 @@ vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vx_u64m2(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4( @@ -769,7 +769,7 @@ vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vv_u64m4(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4( @@ -778,7 +778,7 @@ vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vx_u64m4(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8( @@ -787,7 +787,7 @@ vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vv_u64m8(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8( @@ -796,7 +796,7 @@ vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vx_u64m8(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m( @@ -805,7 +805,7 @@ vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vv_i8mf8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m( @@ -814,7 +814,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vx_i8mf8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m( @@ -823,7 +823,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vv_i8mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m( @@ -832,7 +832,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vx_i8mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m( @@ -841,7 +841,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vv_i8mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m( @@ -850,7 +850,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vx_i8mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m( @@ -859,7 +859,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vv_i8m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m( @@ -868,7 +868,7 @@ vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vx_i8m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m( @@ -877,7 +877,7 @@ vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vv_i8m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m( @@ -886,7 +886,7 @@ vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vx_i8m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m( @@ -895,7 +895,7 @@ vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vv_i8m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m( @@ -904,7 +904,7 @@ vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vx_i8m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m( @@ -913,7 +913,7 @@ vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vv_i8m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m( @@ -922,7 +922,7 @@ vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vx_i8m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m( @@ -931,7 +931,7 @@ vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vv_i16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m( @@ -940,7 +940,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vx_i16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m( @@ -949,7 +949,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vv_i16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m( @@ -958,7 +958,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vx_i16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m( @@ -967,7 +967,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vv_i16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m( @@ -976,7 +976,7 @@ vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vx_i16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m( @@ -985,7 +985,7 @@ vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vv_i16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m( @@ -994,7 +994,7 @@ vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vx_i16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m( @@ -1003,7 +1003,7 @@ vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vv_i16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m( @@ -1012,7 +1012,7 @@ vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vx_i16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m( @@ -1021,7 +1021,7 @@ vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vv_i16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m( @@ -1030,7 +1030,7 @@ vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vx_i16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m( @@ -1039,7 +1039,7 @@ vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vv_i32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m( @@ -1048,7 +1048,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vx_i32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m( @@ -1057,7 +1057,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vv_i32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m( @@ -1066,7 +1066,7 @@ vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vx_i32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m( @@ -1075,7 +1075,7 @@ vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vv_i32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m( @@ -1084,7 +1084,7 @@ vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vx_i32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m( @@ -1093,7 +1093,7 @@ vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vv_i32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m( @@ -1102,7 +1102,7 @@ vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vx_i32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m( @@ -1111,7 +1111,7 @@ vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vv_i32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m( @@ -1120,7 +1120,7 @@ vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vx_i32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m( @@ -1129,7 +1129,7 @@ vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vv_i64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m( @@ -1138,7 +1138,7 @@ vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vx_i64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m( @@ -1147,7 +1147,7 @@ vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vv_i64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m( @@ -1156,7 +1156,7 @@ vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vx_i64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m( @@ -1165,7 +1165,7 @@ vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vv_i64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m( @@ -1174,7 +1174,7 @@ vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vx_i64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m( @@ -1183,7 +1183,7 @@ vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vv_i64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m( @@ -1192,7 +1192,7 @@ vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vx_i64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m( @@ -1201,7 +1201,7 @@ vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vv_u8mf8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m( @@ -1210,7 +1210,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vx_u8mf8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m( @@ -1219,7 +1219,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vv_u8mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m( @@ -1228,7 +1228,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vx_u8mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m( @@ -1237,7 +1237,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vv_u8mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m( @@ -1246,7 +1246,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vx_u8mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m( @@ -1255,7 +1255,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vv_u8m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m( @@ -1264,7 +1264,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vx_u8m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m( @@ -1273,7 +1273,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vv_u8m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m( @@ -1282,7 +1282,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vx_u8m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m( @@ -1291,7 +1291,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vv_u8m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m( @@ -1300,7 +1300,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vx_u8m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m( @@ -1309,7 +1309,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vv_u8m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m( @@ -1318,7 +1318,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vx_u8m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m( @@ -1327,7 +1327,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vv_u16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m( @@ -1336,7 +1336,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vx_u16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m( @@ -1345,7 +1345,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vv_u16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m( @@ -1354,7 +1354,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vx_u16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m( @@ -1363,7 +1363,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vv_u16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m( @@ -1372,7 +1372,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vx_u16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m( @@ -1381,7 +1381,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vv_u16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m( @@ -1390,7 +1390,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vx_u16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m( @@ -1399,7 +1399,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vv_u16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m( @@ -1408,7 +1408,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vx_u16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m( @@ -1417,7 +1417,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vv_u16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m( @@ -1426,7 +1426,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vx_u16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m( @@ -1435,7 +1435,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vv_u32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m( @@ -1444,7 +1444,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vx_u32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m( @@ -1453,7 +1453,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vv_u32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m( @@ -1462,7 +1462,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vx_u32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m( @@ -1471,7 +1471,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vv_u32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m( @@ -1480,7 +1480,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vx_u32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m( @@ -1489,7 +1489,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vv_u32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m( @@ -1498,7 +1498,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vx_u32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m( @@ -1507,7 +1507,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vv_u32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m( @@ -1516,7 +1516,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vx_u32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m( @@ -1525,7 +1525,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vv_u64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m( @@ -1534,7 +1534,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vx_u64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m( @@ -1543,7 +1543,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vv_u64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m( @@ -1552,7 +1552,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vx_u64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m( @@ -1561,7 +1561,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vv_u64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m( @@ -1570,7 +1570,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vx_u64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m( @@ -1579,7 +1579,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vv_u64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m( @@ -1588,6 +1588,6 @@ vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vx_u64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnot.c index 9c432918a6a8..8b9115596856 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnot.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8(vint8mf8_t op1, size_t vl) { - return vnot_v_i8mf8(op1, vl); + return __riscv_vnot_v_i8mf8(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf4( @@ -21,7 +21,7 @@ vint8mf8_t test_vnot_v_i8mf8(vint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4(vint8mf4_t op1, size_t vl) { - return vnot_v_i8mf4(op1, vl); + return __riscv_vnot_v_i8mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf2( @@ -30,7 +30,7 @@ vint8mf4_t test_vnot_v_i8mf4(vint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2(vint8mf2_t op1, size_t vl) { - return vnot_v_i8mf2(op1, vl); + return __riscv_vnot_v_i8mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m1( @@ -39,7 +39,7 @@ vint8mf2_t test_vnot_v_i8mf2(vint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1(vint8m1_t op1, size_t vl) { - return vnot_v_i8m1(op1, vl); + return __riscv_vnot_v_i8m1(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m2( @@ -48,7 +48,7 @@ vint8m1_t test_vnot_v_i8m1(vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2(vint8m2_t op1, size_t vl) { - return vnot_v_i8m2(op1, vl); + return __riscv_vnot_v_i8m2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m4( @@ -57,7 +57,7 @@ vint8m2_t test_vnot_v_i8m2(vint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4(vint8m4_t op1, size_t vl) { - return vnot_v_i8m4(op1, vl); + return __riscv_vnot_v_i8m4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m8( @@ -66,7 +66,7 @@ vint8m4_t test_vnot_v_i8m4(vint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8(vint8m8_t op1, size_t vl) { - return vnot_v_i8m8(op1, vl); + return __riscv_vnot_v_i8m8(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf4( @@ -75,7 +75,7 @@ vint8m8_t test_vnot_v_i8m8(vint8m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4(vint16mf4_t op1, size_t vl) { - return vnot_v_i16mf4(op1, vl); + return __riscv_vnot_v_i16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf2( @@ -84,7 +84,7 @@ vint16mf4_t test_vnot_v_i16mf4(vint16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2(vint16mf2_t op1, size_t vl) { - return vnot_v_i16mf2(op1, vl); + return __riscv_vnot_v_i16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m1( @@ -93,7 +93,7 @@ vint16mf2_t test_vnot_v_i16mf2(vint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1(vint16m1_t op1, size_t vl) { - return vnot_v_i16m1(op1, vl); + return __riscv_vnot_v_i16m1(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m2( @@ -102,7 +102,7 @@ vint16m1_t test_vnot_v_i16m1(vint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2(vint16m2_t op1, size_t vl) { - return vnot_v_i16m2(op1, vl); + return __riscv_vnot_v_i16m2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m4( @@ -111,7 +111,7 @@ vint16m2_t test_vnot_v_i16m2(vint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4(vint16m4_t op1, size_t vl) { - return vnot_v_i16m4(op1, vl); + return __riscv_vnot_v_i16m4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m8( @@ -120,7 +120,7 @@ vint16m4_t test_vnot_v_i16m4(vint16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8(vint16m8_t op1, size_t vl) { - return vnot_v_i16m8(op1, vl); + return __riscv_vnot_v_i16m8(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32mf2( @@ -129,7 +129,7 @@ vint16m8_t test_vnot_v_i16m8(vint16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2(vint32mf2_t op1, size_t vl) { - return vnot_v_i32mf2(op1, vl); + return __riscv_vnot_v_i32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m1( @@ -138,7 +138,7 @@ vint32mf2_t test_vnot_v_i32mf2(vint32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1(vint32m1_t op1, size_t vl) { - return vnot_v_i32m1(op1, vl); + return __riscv_vnot_v_i32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m2( @@ -147,7 +147,7 @@ vint32m1_t test_vnot_v_i32m1(vint32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2(vint32m2_t op1, size_t vl) { - return vnot_v_i32m2(op1, vl); + return __riscv_vnot_v_i32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m4( @@ -156,7 +156,7 @@ vint32m2_t test_vnot_v_i32m2(vint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4(vint32m4_t op1, size_t vl) { - return vnot_v_i32m4(op1, vl); + return __riscv_vnot_v_i32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m8( @@ -165,7 +165,7 @@ vint32m4_t test_vnot_v_i32m4(vint32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8(vint32m8_t op1, size_t vl) { - return vnot_v_i32m8(op1, vl); + return __riscv_vnot_v_i32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m1( @@ -174,7 +174,7 @@ vint32m8_t test_vnot_v_i32m8(vint32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1(vint64m1_t op1, size_t vl) { - return vnot_v_i64m1(op1, vl); + return __riscv_vnot_v_i64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m2( @@ -183,7 +183,7 @@ vint64m1_t test_vnot_v_i64m1(vint64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2(vint64m2_t op1, size_t vl) { - return vnot_v_i64m2(op1, vl); + return __riscv_vnot_v_i64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m4( @@ -192,7 +192,7 @@ vint64m2_t test_vnot_v_i64m2(vint64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4(vint64m4_t op1, size_t vl) { - return vnot_v_i64m4(op1, vl); + return __riscv_vnot_v_i64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m8( @@ -201,7 +201,7 @@ vint64m4_t test_vnot_v_i64m4(vint64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8(vint64m8_t op1, size_t vl) { - return vnot_v_i64m8(op1, vl); + return __riscv_vnot_v_i64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf8( @@ -210,7 +210,7 @@ vint64m8_t test_vnot_v_i64m8(vint64m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8(vuint8mf8_t op1, size_t vl) { - return vnot_v_u8mf8(op1, vl); + return __riscv_vnot_v_u8mf8(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf4( @@ -219,7 +219,7 @@ vuint8mf8_t test_vnot_v_u8mf8(vuint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4(vuint8mf4_t op1, size_t vl) { - return vnot_v_u8mf4(op1, vl); + return __riscv_vnot_v_u8mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf2( @@ -228,7 +228,7 @@ vuint8mf4_t test_vnot_v_u8mf4(vuint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2(vuint8mf2_t op1, size_t vl) { - return vnot_v_u8mf2(op1, vl); + return __riscv_vnot_v_u8mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m1( @@ -237,7 +237,7 @@ vuint8mf2_t test_vnot_v_u8mf2(vuint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1(vuint8m1_t op1, size_t vl) { - return vnot_v_u8m1(op1, vl); + return __riscv_vnot_v_u8m1(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m2( @@ -246,7 +246,7 @@ vuint8m1_t test_vnot_v_u8m1(vuint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2(vuint8m2_t op1, size_t vl) { - return vnot_v_u8m2(op1, vl); + return __riscv_vnot_v_u8m2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m4( @@ -255,7 +255,7 @@ vuint8m2_t test_vnot_v_u8m2(vuint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4(vuint8m4_t op1, size_t vl) { - return vnot_v_u8m4(op1, vl); + return __riscv_vnot_v_u8m4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m8( @@ -264,7 +264,7 @@ vuint8m4_t test_vnot_v_u8m4(vuint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8(vuint8m8_t op1, size_t vl) { - return vnot_v_u8m8(op1, vl); + return __riscv_vnot_v_u8m8(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf4( @@ -273,7 +273,7 @@ vuint8m8_t test_vnot_v_u8m8(vuint8m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4(vuint16mf4_t op1, size_t vl) { - return vnot_v_u16mf4(op1, vl); + return __riscv_vnot_v_u16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf2( @@ -282,7 +282,7 @@ vuint16mf4_t test_vnot_v_u16mf4(vuint16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2(vuint16mf2_t op1, size_t vl) { - return vnot_v_u16mf2(op1, vl); + return __riscv_vnot_v_u16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m1( @@ -291,7 +291,7 @@ vuint16mf2_t test_vnot_v_u16mf2(vuint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1(vuint16m1_t op1, size_t vl) { - return vnot_v_u16m1(op1, vl); + return __riscv_vnot_v_u16m1(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m2( @@ -300,7 +300,7 @@ vuint16m1_t test_vnot_v_u16m1(vuint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2(vuint16m2_t op1, size_t vl) { - return vnot_v_u16m2(op1, vl); + return __riscv_vnot_v_u16m2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m4( @@ -309,7 +309,7 @@ vuint16m2_t test_vnot_v_u16m2(vuint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4(vuint16m4_t op1, size_t vl) { - return vnot_v_u16m4(op1, vl); + return __riscv_vnot_v_u16m4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m8( @@ -318,7 +318,7 @@ vuint16m4_t test_vnot_v_u16m4(vuint16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8(vuint16m8_t op1, size_t vl) { - return vnot_v_u16m8(op1, vl); + return __riscv_vnot_v_u16m8(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32mf2( @@ -327,7 +327,7 @@ vuint16m8_t test_vnot_v_u16m8(vuint16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2(vuint32mf2_t op1, size_t vl) { - return vnot_v_u32mf2(op1, vl); + return __riscv_vnot_v_u32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m1( @@ -336,7 +336,7 @@ vuint32mf2_t test_vnot_v_u32mf2(vuint32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1(vuint32m1_t op1, size_t vl) { - return vnot_v_u32m1(op1, vl); + return __riscv_vnot_v_u32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m2( @@ -345,7 +345,7 @@ vuint32m1_t test_vnot_v_u32m1(vuint32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2(vuint32m2_t op1, size_t vl) { - return vnot_v_u32m2(op1, vl); + return __riscv_vnot_v_u32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m4( @@ -354,7 +354,7 @@ vuint32m2_t test_vnot_v_u32m2(vuint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4(vuint32m4_t op1, size_t vl) { - return vnot_v_u32m4(op1, vl); + return __riscv_vnot_v_u32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m8( @@ -363,7 +363,7 @@ vuint32m4_t test_vnot_v_u32m4(vuint32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8(vuint32m8_t op1, size_t vl) { - return vnot_v_u32m8(op1, vl); + return __riscv_vnot_v_u32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m1( @@ -372,7 +372,7 @@ vuint32m8_t test_vnot_v_u32m8(vuint32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1(vuint64m1_t op1, size_t vl) { - return vnot_v_u64m1(op1, vl); + return __riscv_vnot_v_u64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m2( @@ -381,7 +381,7 @@ vuint64m1_t test_vnot_v_u64m1(vuint64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2(vuint64m2_t op1, size_t vl) { - return vnot_v_u64m2(op1, vl); + return __riscv_vnot_v_u64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m4( @@ -390,7 +390,7 @@ vuint64m2_t test_vnot_v_u64m2(vuint64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4(vuint64m4_t op1, size_t vl) { - return vnot_v_u64m4(op1, vl); + return __riscv_vnot_v_u64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m8( @@ -399,7 +399,7 @@ vuint64m4_t test_vnot_v_u64m4(vuint64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8(vuint64m8_t op1, size_t vl) { - return vnot_v_u64m8(op1, vl); + return __riscv_vnot_v_u64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vnot_v_u64m8(vuint64m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return vnot_v_i8mf8_m(mask, op1, vl); + return __riscv_vnot_v_i8mf8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf4_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return vnot_v_i8mf4_m(mask, op1, vl); + return __riscv_vnot_v_i8mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf2_m( @@ -426,7 +426,7 @@ vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return vnot_v_i8mf2_m(mask, op1, vl); + return __riscv_vnot_v_i8mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m1_m( @@ -435,7 +435,7 @@ vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return vnot_v_i8m1_m(mask, op1, vl); + return __riscv_vnot_v_i8m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m2_m( @@ -444,7 +444,7 @@ vint8m1_t test_vnot_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return vnot_v_i8m2_m(mask, op1, vl); + return __riscv_vnot_v_i8m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m4_m( @@ -453,7 +453,7 @@ vint8m2_t test_vnot_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { - return vnot_v_i8m4_m(mask, op1, vl); + return __riscv_vnot_v_i8m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m8_m( @@ -462,7 +462,7 @@ vint8m4_t test_vnot_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { - return vnot_v_i8m8_m(mask, op1, vl); + return __riscv_vnot_v_i8m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf4_m( @@ -471,7 +471,7 @@ vint8m8_t test_vnot_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return vnot_v_i16mf4_m(mask, op1, vl); + return __riscv_vnot_v_i16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf2_m( @@ -480,7 +480,7 @@ vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return vnot_v_i16mf2_m(mask, op1, vl); + return __riscv_vnot_v_i16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m1_m( @@ -489,7 +489,7 @@ vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return vnot_v_i16m1_m(mask, op1, vl); + return __riscv_vnot_v_i16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m2_m( @@ -498,7 +498,7 @@ vint16m1_t test_vnot_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return vnot_v_i16m2_m(mask, op1, vl); + return __riscv_vnot_v_i16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m4_m( @@ -507,7 +507,7 @@ vint16m2_t test_vnot_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { - return vnot_v_i16m4_m(mask, op1, vl); + return __riscv_vnot_v_i16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m8_m( @@ -516,7 +516,7 @@ vint16m4_t test_vnot_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { - return vnot_v_i16m8_m(mask, op1, vl); + return __riscv_vnot_v_i16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32mf2_m( @@ -525,7 +525,7 @@ vint16m8_t test_vnot_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { - return vnot_v_i32mf2_m(mask, op1, vl); + return __riscv_vnot_v_i32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m1_m( @@ -534,7 +534,7 @@ vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { - return vnot_v_i32m1_m(mask, op1, vl); + return __riscv_vnot_v_i32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m2_m( @@ -543,7 +543,7 @@ vint32m1_t test_vnot_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { - return vnot_v_i32m2_m(mask, op1, vl); + return __riscv_vnot_v_i32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m4_m( @@ -552,7 +552,7 @@ vint32m2_t test_vnot_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { - return vnot_v_i32m4_m(mask, op1, vl); + return __riscv_vnot_v_i32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m8_m( @@ -561,7 +561,7 @@ vint32m4_t test_vnot_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { - return vnot_v_i32m8_m(mask, op1, vl); + return __riscv_vnot_v_i32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m1_m( @@ -570,7 +570,7 @@ vint32m8_t test_vnot_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { - return vnot_v_i64m1_m(mask, op1, vl); + return __riscv_vnot_v_i64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m2_m( @@ -579,7 +579,7 @@ vint64m1_t test_vnot_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { - return vnot_v_i64m2_m(mask, op1, vl); + return __riscv_vnot_v_i64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m4_m( @@ -588,7 +588,7 @@ vint64m2_t test_vnot_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { - return vnot_v_i64m4_m(mask, op1, vl); + return __riscv_vnot_v_i64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m8_m( @@ -597,7 +597,7 @@ vint64m4_t test_vnot_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) { - return vnot_v_i64m8_m(mask, op1, vl); + return __riscv_vnot_v_i64m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf8_m( @@ -606,7 +606,7 @@ vint64m8_t test_vnot_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return vnot_v_u8mf8_m(mask, op1, vl); + return __riscv_vnot_v_u8mf8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf4_m( @@ -615,7 +615,7 @@ vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return vnot_v_u8mf4_m(mask, op1, vl); + return __riscv_vnot_v_u8mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf2_m( @@ -624,7 +624,7 @@ vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return vnot_v_u8mf2_m(mask, op1, vl); + return __riscv_vnot_v_u8mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m1_m( @@ -633,7 +633,7 @@ vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return vnot_v_u8m1_m(mask, op1, vl); + return __riscv_vnot_v_u8m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m2_m( @@ -642,7 +642,7 @@ vuint8m1_t test_vnot_v_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { - return vnot_v_u8m2_m(mask, op1, vl); + return __riscv_vnot_v_u8m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m4_m( @@ -651,7 +651,7 @@ vuint8m2_t test_vnot_v_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { - return vnot_v_u8m4_m(mask, op1, vl); + return __riscv_vnot_v_u8m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m8_m( @@ -660,7 +660,7 @@ vuint8m4_t test_vnot_v_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t vl) { - return vnot_v_u8m8_m(mask, op1, vl); + return __riscv_vnot_v_u8m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf4_m( @@ -669,7 +669,7 @@ vuint8m8_t test_vnot_v_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { - return vnot_v_u16mf4_m(mask, op1, vl); + return __riscv_vnot_v_u16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf2_m( @@ -678,7 +678,7 @@ vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { - return vnot_v_u16mf2_m(mask, op1, vl); + return __riscv_vnot_v_u16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m1_m( @@ -687,7 +687,7 @@ vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { - return vnot_v_u16m1_m(mask, op1, vl); + return __riscv_vnot_v_u16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m2_m( @@ -696,7 +696,7 @@ vuint16m1_t test_vnot_v_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { - return vnot_v_u16m2_m(mask, op1, vl); + return __riscv_vnot_v_u16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m4_m( @@ -705,7 +705,7 @@ vuint16m2_t test_vnot_v_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { - return vnot_v_u16m4_m(mask, op1, vl); + return __riscv_vnot_v_u16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m8_m( @@ -714,7 +714,7 @@ vuint16m4_t test_vnot_v_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t vl) { - return vnot_v_u16m8_m(mask, op1, vl); + return __riscv_vnot_v_u16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32mf2_m( @@ -723,7 +723,7 @@ vuint16m8_t test_vnot_v_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) { - return vnot_v_u32mf2_m(mask, op1, vl); + return __riscv_vnot_v_u32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m1_m( @@ -732,7 +732,7 @@ vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { - return vnot_v_u32m1_m(mask, op1, vl); + return __riscv_vnot_v_u32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m2_m( @@ -741,7 +741,7 @@ vuint32m1_t test_vnot_v_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { - return vnot_v_u32m2_m(mask, op1, vl); + return __riscv_vnot_v_u32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m4_m( @@ -750,7 +750,7 @@ vuint32m2_t test_vnot_v_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t vl) { - return vnot_v_u32m4_m(mask, op1, vl); + return __riscv_vnot_v_u32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m8_m( @@ -759,7 +759,7 @@ vuint32m4_t test_vnot_v_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t vl) { - return vnot_v_u32m8_m(mask, op1, vl); + return __riscv_vnot_v_u32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m1_m( @@ -768,7 +768,7 @@ vuint32m8_t test_vnot_v_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t vl) { - return vnot_v_u64m1_m(mask, op1, vl); + return __riscv_vnot_v_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m2_m( @@ -777,7 +777,7 @@ vuint64m1_t test_vnot_v_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t vl) { - return vnot_v_u64m2_m(mask, op1, vl); + return __riscv_vnot_v_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m4_m( @@ -786,7 +786,7 @@ vuint64m2_t test_vnot_v_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t vl) { - return vnot_v_u64m4_m(mask, op1, vl); + return __riscv_vnot_v_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m8_m( @@ -795,6 +795,6 @@ vuint64m4_t test_vnot_v_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t vl) { - return vnot_v_u64m8_m(mask, op1, vl); + return __riscv_vnot_v_u64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsra.c index 0bb7077dbf81..cf22c2c44700 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsra.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsra_wv_i8mf8(op1, shift, vl); + return __riscv_vnsra_wv_i8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf8(op1, shift, vl); + return __riscv_vnsra_wx_i8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsra_wv_i8mf4(op1, shift, vl); + return __riscv_vnsra_wv_i8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf4(op1, shift, vl); + return __riscv_vnsra_wx_i8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsra_wv_i8mf2(op1, shift, vl); + return __riscv_vnsra_wv_i8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf2(op1, shift, vl); + return __riscv_vnsra_wx_i8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsra_wv_i8m1(op1, shift, vl); + return __riscv_vnsra_wv_i8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m1(op1, shift, vl); + return __riscv_vnsra_wx_i8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsra_wv_i8m2(op1, shift, vl); + return __riscv_vnsra_wv_i8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m2(op1, shift, vl); + return __riscv_vnsra_wx_i8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsra_wv_i8m4(op1, shift, vl); + return __riscv_vnsra_wv_i8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m4(op1, shift, vl); + return __riscv_vnsra_wx_i8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4( @@ -120,7 +120,7 @@ vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsra_wv_i16mf4(op1, shift, vl); + return __riscv_vnsra_wv_i16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4( @@ -129,7 +129,7 @@ vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf4(op1, shift, vl); + return __riscv_vnsra_wx_i16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2( @@ -138,7 +138,7 @@ vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsra_wv_i16mf2(op1, shift, vl); + return __riscv_vnsra_wv_i16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2( @@ -147,7 +147,7 @@ vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf2(op1, shift, vl); + return __riscv_vnsra_wx_i16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1( @@ -156,7 +156,7 @@ vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsra_wv_i16m1(op1, shift, vl); + return __riscv_vnsra_wv_i16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1( @@ -165,7 +165,7 @@ vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m1(op1, shift, vl); + return __riscv_vnsra_wx_i16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2( @@ -174,7 +174,7 @@ vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsra_wv_i16m2(op1, shift, vl); + return __riscv_vnsra_wv_i16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2( @@ -183,7 +183,7 @@ vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m2(op1, shift, vl); + return __riscv_vnsra_wx_i16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4( @@ -192,7 +192,7 @@ vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsra_wv_i16m4(op1, shift, vl); + return __riscv_vnsra_wv_i16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4( @@ -201,7 +201,7 @@ vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m4(op1, shift, vl); + return __riscv_vnsra_wx_i16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2( @@ -210,7 +210,7 @@ vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsra_wv_i32mf2(op1, shift, vl); + return __riscv_vnsra_wv_i32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2( @@ -219,7 +219,7 @@ vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32mf2(op1, shift, vl); + return __riscv_vnsra_wx_i32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1( @@ -228,7 +228,7 @@ vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsra_wv_i32m1(op1, shift, vl); + return __riscv_vnsra_wv_i32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1( @@ -237,7 +237,7 @@ vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m1(op1, shift, vl); + return __riscv_vnsra_wx_i32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2( @@ -246,7 +246,7 @@ vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsra_wv_i32m2(op1, shift, vl); + return __riscv_vnsra_wv_i32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2( @@ -255,7 +255,7 @@ vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m2(op1, shift, vl); + return __riscv_vnsra_wx_i32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4( @@ -264,7 +264,7 @@ vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsra_wv_i32m4(op1, shift, vl); + return __riscv_vnsra_wv_i32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4( @@ -273,7 +273,7 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m4(op1, shift, vl); + return __riscv_vnsra_wx_i32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m( @@ -282,7 +282,7 @@ vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsra_wv_i8mf8_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m( @@ -291,7 +291,7 @@ vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf8_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m( @@ -300,7 +300,7 @@ vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsra_wv_i8mf4_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m( @@ -309,7 +309,7 @@ vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf4_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m( @@ -318,7 +318,7 @@ vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsra_wv_i8mf2_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m( @@ -327,7 +327,7 @@ vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf2_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m( @@ -336,7 +336,7 @@ vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsra_wv_i8m1_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m( @@ -345,7 +345,7 @@ vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m1_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m( @@ -354,7 +354,7 @@ vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsra_wv_i8m2_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m( @@ -363,7 +363,7 @@ vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m2_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m( @@ -372,7 +372,7 @@ vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsra_wv_i8m4_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m( @@ -381,7 +381,7 @@ vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m4_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m( @@ -390,7 +390,7 @@ vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsra_wv_i16mf4_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m( @@ -399,7 +399,7 @@ vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf4_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m( @@ -408,7 +408,7 @@ vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsra_wv_i16mf2_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m( @@ -417,7 +417,7 @@ vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf2_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m( @@ -426,7 +426,7 @@ vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsra_wv_i16m1_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m( @@ -435,7 +435,7 @@ vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m1_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m( @@ -444,7 +444,7 @@ vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsra_wv_i16m2_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m( @@ -453,7 +453,7 @@ vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m2_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m( @@ -462,7 +462,7 @@ vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsra_wv_i16m4_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m( @@ -471,7 +471,7 @@ vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m4_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m( @@ -480,7 +480,7 @@ vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsra_wv_i32mf2_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m( @@ -489,7 +489,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32mf2_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m( @@ -498,7 +498,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsra_wv_i32m1_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m( @@ -507,7 +507,7 @@ vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m1_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m( @@ -516,7 +516,7 @@ vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsra_wv_i32m2_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m( @@ -525,7 +525,7 @@ vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m2_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m( @@ -534,7 +534,7 @@ vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsra_wv_i32m4_m(mask, op1, shift, vl); + return __riscv_vnsra_wv_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m( @@ -543,6 +543,6 @@ vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m4_m(mask, op1, shift, vl); + return __riscv_vnsra_wx_i32m4_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsrl.c index 571b9f0f733f..0506681b1a5b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vnsrl.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsrl_wv_u8mf8(op1, shift, vl); + return __riscv_vnsrl_wv_u8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf8(op1, shift, vl); + return __riscv_vnsrl_wx_u8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsrl_wv_u8mf4(op1, shift, vl); + return __riscv_vnsrl_wv_u8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf4(op1, shift, vl); + return __riscv_vnsrl_wx_u8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsrl_wv_u8mf2(op1, shift, vl); + return __riscv_vnsrl_wv_u8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf2(op1, shift, vl); + return __riscv_vnsrl_wx_u8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsrl_wv_u8m1(op1, shift, vl); + return __riscv_vnsrl_wv_u8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m1(op1, shift, vl); + return __riscv_vnsrl_wx_u8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsrl_wv_u8m2(op1, shift, vl); + return __riscv_vnsrl_wv_u8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m2(op1, shift, vl); + return __riscv_vnsrl_wx_u8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsrl_wv_u8m4(op1, shift, vl); + return __riscv_vnsrl_wv_u8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m4(op1, shift, vl); + return __riscv_vnsrl_wx_u8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4( @@ -120,7 +120,7 @@ vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsrl_wv_u16mf4(op1, shift, vl); + return __riscv_vnsrl_wv_u16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4( @@ -129,7 +129,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf4(op1, shift, vl); + return __riscv_vnsrl_wx_u16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2( @@ -138,7 +138,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsrl_wv_u16mf2(op1, shift, vl); + return __riscv_vnsrl_wv_u16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2( @@ -147,7 +147,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf2(op1, shift, vl); + return __riscv_vnsrl_wx_u16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1( @@ -156,7 +156,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsrl_wv_u16m1(op1, shift, vl); + return __riscv_vnsrl_wv_u16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1( @@ -165,7 +165,7 @@ vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m1(op1, shift, vl); + return __riscv_vnsrl_wx_u16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2( @@ -174,7 +174,7 @@ vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsrl_wv_u16m2(op1, shift, vl); + return __riscv_vnsrl_wv_u16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2( @@ -183,7 +183,7 @@ vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m2(op1, shift, vl); + return __riscv_vnsrl_wx_u16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4( @@ -192,7 +192,7 @@ vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsrl_wv_u16m4(op1, shift, vl); + return __riscv_vnsrl_wv_u16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4( @@ -201,7 +201,7 @@ vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m4(op1, shift, vl); + return __riscv_vnsrl_wx_u16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2( @@ -210,7 +210,7 @@ vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsrl_wv_u32mf2(op1, shift, vl); + return __riscv_vnsrl_wv_u32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2( @@ -219,7 +219,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32mf2(op1, shift, vl); + return __riscv_vnsrl_wx_u32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1( @@ -228,7 +228,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsrl_wv_u32m1(op1, shift, vl); + return __riscv_vnsrl_wv_u32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1( @@ -237,7 +237,7 @@ vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m1(op1, shift, vl); + return __riscv_vnsrl_wx_u32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2( @@ -246,7 +246,7 @@ vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsrl_wv_u32m2(op1, shift, vl); + return __riscv_vnsrl_wv_u32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2( @@ -255,7 +255,7 @@ vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m2(op1, shift, vl); + return __riscv_vnsrl_wx_u32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4( @@ -264,7 +264,7 @@ vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsrl_wv_u32m4(op1, shift, vl); + return __riscv_vnsrl_wv_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4( @@ -273,7 +273,7 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m4(op1, shift, vl); + return __riscv_vnsrl_wx_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_m( @@ -282,7 +282,7 @@ vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsrl_wv_u8mf8_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m( @@ -291,7 +291,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf8_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m( @@ -300,7 +300,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsrl_wv_u8mf4_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m( @@ -309,7 +309,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf4_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m( @@ -318,7 +318,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsrl_wv_u8mf2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m( @@ -327,7 +327,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m( @@ -336,7 +336,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsrl_wv_u8m1_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m( @@ -345,7 +345,7 @@ vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m1_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m( @@ -354,7 +354,7 @@ vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsrl_wv_u8m2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m( @@ -363,7 +363,7 @@ vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m( @@ -372,7 +372,7 @@ vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsrl_wv_u8m4_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m( @@ -381,7 +381,7 @@ vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m4_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m( @@ -390,7 +390,7 @@ vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsrl_wv_u16mf4_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m( @@ -399,7 +399,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf4_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m( @@ -408,7 +408,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsrl_wv_u16mf2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m( @@ -417,7 +417,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m( @@ -426,7 +426,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsrl_wv_u16m1_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m( @@ -435,7 +435,7 @@ vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m1_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m( @@ -444,7 +444,7 @@ vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsrl_wv_u16m2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m( @@ -453,7 +453,7 @@ vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m( @@ -462,7 +462,7 @@ vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsrl_wv_u16m4_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m( @@ -471,7 +471,7 @@ vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m4_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m( @@ -480,7 +480,7 @@ vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsrl_wv_u32mf2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m( @@ -489,7 +489,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32mf2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m( @@ -498,7 +498,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsrl_wv_u32m1_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m( @@ -507,7 +507,7 @@ vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m1_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m( @@ -516,7 +516,7 @@ vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsrl_wv_u32m2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m( @@ -525,7 +525,7 @@ vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m2_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m( @@ -534,7 +534,7 @@ vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsrl_wv_u32m4_m(mask, op1, shift, vl); + return __riscv_vnsrl_wv_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m( @@ -543,6 +543,6 @@ vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m4_m(mask, op1, shift, vl); + return __riscv_vnsrl_wx_u32m4_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vor.c index 6a7a52a49a0b..d5358def7a32 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vor_vv_i8mf8(op1, op2, vl); + return __riscv_vor_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf8(op1, op2, vl); + return __riscv_vor_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vor_vv_i8mf4(op1, op2, vl); + return __riscv_vor_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf4(op1, op2, vl); + return __riscv_vor_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vor_vv_i8mf2(op1, op2, vl); + return __riscv_vor_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf2(op1, op2, vl); + return __riscv_vor_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vor_vv_i8m1(op1, op2, vl); + return __riscv_vor_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m1(op1, op2, vl); + return __riscv_vor_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vor_vv_i8m2(op1, op2, vl); + return __riscv_vor_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m2(op1, op2, vl); + return __riscv_vor_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vor_vv_i8m4(op1, op2, vl); + return __riscv_vor_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m4(op1, op2, vl); + return __riscv_vor_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vor_vv_i8m8(op1, op2, vl); + return __riscv_vor_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m8(op1, op2, vl); + return __riscv_vor_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vor_vv_i16mf4(op1, op2, vl); + return __riscv_vor_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf4(op1, op2, vl); + return __riscv_vor_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vor_vv_i16mf2(op1, op2, vl); + return __riscv_vor_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf2(op1, op2, vl); + return __riscv_vor_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vor_vv_i16m1(op1, op2, vl); + return __riscv_vor_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m1(op1, op2, vl); + return __riscv_vor_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vor_vv_i16m2(op1, op2, vl); + return __riscv_vor_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m2(op1, op2, vl); + return __riscv_vor_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vor_vv_i16m4(op1, op2, vl); + return __riscv_vor_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m4(op1, op2, vl); + return __riscv_vor_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vor_vv_i16m8(op1, op2, vl); + return __riscv_vor_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m8(op1, op2, vl); + return __riscv_vor_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vor_vv_i32mf2(op1, op2, vl); + return __riscv_vor_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32mf2(op1, op2, vl); + return __riscv_vor_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vor_vv_i32m1(op1, op2, vl); + return __riscv_vor_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m1(op1, op2, vl); + return __riscv_vor_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vor_vv_i32m2(op1, op2, vl); + return __riscv_vor_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m2(op1, op2, vl); + return __riscv_vor_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vor_vv_i32m4(op1, op2, vl); + return __riscv_vor_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m4(op1, op2, vl); + return __riscv_vor_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vor_vv_i32m8(op1, op2, vl); + return __riscv_vor_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m8(op1, op2, vl); + return __riscv_vor_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vor_vv_i64m1(op1, op2, vl); + return __riscv_vor_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m1(op1, op2, vl); + return __riscv_vor_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vor_vv_i64m2(op1, op2, vl); + return __riscv_vor_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m2(op1, op2, vl); + return __riscv_vor_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vor_vv_i64m4(op1, op2, vl); + return __riscv_vor_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m4(op1, op2, vl); + return __riscv_vor_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vor_vv_i64m8(op1, op2, vl); + return __riscv_vor_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m8(op1, op2, vl); + return __riscv_vor_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf8( @@ -408,7 +408,7 @@ vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vor_vv_u8mf8(op1, op2, vl); + return __riscv_vor_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf8( @@ -417,7 +417,7 @@ vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf8(op1, op2, vl); + return __riscv_vor_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf4( @@ -426,7 +426,7 @@ vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vor_vv_u8mf4(op1, op2, vl); + return __riscv_vor_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf4( @@ -435,7 +435,7 @@ vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf4(op1, op2, vl); + return __riscv_vor_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf2( @@ -444,7 +444,7 @@ vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vor_vv_u8mf2(op1, op2, vl); + return __riscv_vor_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf2( @@ -453,7 +453,7 @@ vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf2(op1, op2, vl); + return __riscv_vor_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m1( @@ -462,7 +462,7 @@ vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vor_vv_u8m1(op1, op2, vl); + return __riscv_vor_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m1( @@ -471,7 +471,7 @@ vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m1(op1, op2, vl); + return __riscv_vor_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m2( @@ -480,7 +480,7 @@ vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vor_vv_u8m2(op1, op2, vl); + return __riscv_vor_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m2( @@ -489,7 +489,7 @@ vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m2(op1, op2, vl); + return __riscv_vor_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m4( @@ -498,7 +498,7 @@ vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vor_vv_u8m4(op1, op2, vl); + return __riscv_vor_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m4( @@ -507,7 +507,7 @@ vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m4(op1, op2, vl); + return __riscv_vor_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m8( @@ -516,7 +516,7 @@ vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vor_vv_u8m8(op1, op2, vl); + return __riscv_vor_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m8( @@ -525,7 +525,7 @@ vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m8(op1, op2, vl); + return __riscv_vor_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf4( @@ -534,7 +534,7 @@ vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vor_vv_u16mf4(op1, op2, vl); + return __riscv_vor_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf4( @@ -543,7 +543,7 @@ vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf4(op1, op2, vl); + return __riscv_vor_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf2( @@ -552,7 +552,7 @@ vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vor_vv_u16mf2(op1, op2, vl); + return __riscv_vor_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf2( @@ -561,7 +561,7 @@ vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf2(op1, op2, vl); + return __riscv_vor_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m1( @@ -570,7 +570,7 @@ vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vor_vv_u16m1(op1, op2, vl); + return __riscv_vor_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m1( @@ -579,7 +579,7 @@ vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m1(op1, op2, vl); + return __riscv_vor_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m2( @@ -588,7 +588,7 @@ vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vor_vv_u16m2(op1, op2, vl); + return __riscv_vor_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m2( @@ -597,7 +597,7 @@ vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m2(op1, op2, vl); + return __riscv_vor_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m4( @@ -606,7 +606,7 @@ vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vor_vv_u16m4(op1, op2, vl); + return __riscv_vor_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m4( @@ -615,7 +615,7 @@ vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m4(op1, op2, vl); + return __riscv_vor_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m8( @@ -624,7 +624,7 @@ vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vor_vv_u16m8(op1, op2, vl); + return __riscv_vor_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m8( @@ -633,7 +633,7 @@ vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m8(op1, op2, vl); + return __riscv_vor_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32mf2( @@ -642,7 +642,7 @@ vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vor_vv_u32mf2(op1, op2, vl); + return __riscv_vor_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32mf2( @@ -651,7 +651,7 @@ vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32mf2(op1, op2, vl); + return __riscv_vor_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m1( @@ -660,7 +660,7 @@ vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vor_vv_u32m1(op1, op2, vl); + return __riscv_vor_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m1( @@ -669,7 +669,7 @@ vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m1(op1, op2, vl); + return __riscv_vor_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m2( @@ -678,7 +678,7 @@ vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vor_vv_u32m2(op1, op2, vl); + return __riscv_vor_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m2( @@ -687,7 +687,7 @@ vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m2(op1, op2, vl); + return __riscv_vor_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m4( @@ -696,7 +696,7 @@ vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vor_vv_u32m4(op1, op2, vl); + return __riscv_vor_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m4( @@ -705,7 +705,7 @@ vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m4(op1, op2, vl); + return __riscv_vor_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m8( @@ -714,7 +714,7 @@ vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vor_vv_u32m8(op1, op2, vl); + return __riscv_vor_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m8( @@ -723,7 +723,7 @@ vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m8(op1, op2, vl); + return __riscv_vor_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m1( @@ -732,7 +732,7 @@ vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vor_vv_u64m1(op1, op2, vl); + return __riscv_vor_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m1( @@ -741,7 +741,7 @@ vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m1(op1, op2, vl); + return __riscv_vor_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m2( @@ -750,7 +750,7 @@ vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vor_vv_u64m2(op1, op2, vl); + return __riscv_vor_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m2( @@ -759,7 +759,7 @@ vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m2(op1, op2, vl); + return __riscv_vor_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m4( @@ -768,7 +768,7 @@ vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vor_vv_u64m4(op1, op2, vl); + return __riscv_vor_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m4( @@ -777,7 +777,7 @@ vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m4(op1, op2, vl); + return __riscv_vor_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m8( @@ -786,7 +786,7 @@ vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vor_vv_u64m8(op1, op2, vl); + return __riscv_vor_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m8( @@ -795,7 +795,7 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m8(op1, op2, vl); + return __riscv_vor_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m( @@ -804,7 +804,7 @@ vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vor_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vor_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m( @@ -813,7 +813,7 @@ vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vor_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m( @@ -822,7 +822,7 @@ vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vor_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vor_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m( @@ -831,7 +831,7 @@ vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vor_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m( @@ -840,7 +840,7 @@ vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vor_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vor_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m( @@ -849,7 +849,7 @@ vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vor_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m1_m( @@ -858,7 +858,7 @@ vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vor_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vor_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m1_m( @@ -867,7 +867,7 @@ vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vor_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m2_m( @@ -876,7 +876,7 @@ vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vor_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vor_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m2_m( @@ -885,7 +885,7 @@ vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vor_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m4_m( @@ -894,7 +894,7 @@ vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vor_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vor_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m4_m( @@ -903,7 +903,7 @@ vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vor_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m8_m( @@ -912,7 +912,7 @@ vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vor_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vor_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m8_m( @@ -921,7 +921,7 @@ vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vor_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m( @@ -930,7 +930,7 @@ vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vor_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vor_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m( @@ -939,7 +939,7 @@ vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vor_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m( @@ -948,7 +948,7 @@ vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vor_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vor_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m( @@ -957,7 +957,7 @@ vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vor_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m1_m( @@ -966,7 +966,7 @@ vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vor_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vor_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m1_m( @@ -975,7 +975,7 @@ vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vor_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m2_m( @@ -984,7 +984,7 @@ vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vor_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vor_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m2_m( @@ -993,7 +993,7 @@ vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vor_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m4_m( @@ -1002,7 +1002,7 @@ vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vor_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vor_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m4_m( @@ -1011,7 +1011,7 @@ vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vor_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m8_m( @@ -1020,7 +1020,7 @@ vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vor_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vor_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m8_m( @@ -1029,7 +1029,7 @@ vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vor_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m( @@ -1038,7 +1038,7 @@ vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vor_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vor_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vor_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m1_m( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vor_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vor_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m1_m( @@ -1065,7 +1065,7 @@ vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vor_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m2_m( @@ -1074,7 +1074,7 @@ vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vor_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vor_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m2_m( @@ -1083,7 +1083,7 @@ vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vor_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m4_m( @@ -1092,7 +1092,7 @@ vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vor_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vor_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m4_m( @@ -1101,7 +1101,7 @@ vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vor_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m8_m( @@ -1110,7 +1110,7 @@ vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vor_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vor_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m8_m( @@ -1119,7 +1119,7 @@ vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vor_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m1_m( @@ -1128,7 +1128,7 @@ vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vor_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vor_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m1_m( @@ -1137,7 +1137,7 @@ vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vor_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m2_m( @@ -1146,7 +1146,7 @@ vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vor_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vor_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m2_m( @@ -1155,7 +1155,7 @@ vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vor_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m4_m( @@ -1164,7 +1164,7 @@ vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vor_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vor_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m4_m( @@ -1173,7 +1173,7 @@ vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vor_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m8_m( @@ -1182,7 +1182,7 @@ vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vor_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vor_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m8_m( @@ -1191,7 +1191,7 @@ vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vor_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m( @@ -1200,7 +1200,7 @@ vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vor_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vor_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vor_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vor_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vor_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vor_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vor_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vor_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vor_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m1_m( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vor_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vor_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m1_m( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vor_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m2_m( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vor_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vor_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m2_m( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vor_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m4_m( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vor_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vor_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m4_m( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vor_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m8_m( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vor_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vor_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m8_m( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vor_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vor_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vor_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vor_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vor_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vor_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vor_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m1_m( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vor_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vor_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m1_m( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vor_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m2_m( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vor_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vor_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m2_m( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vor_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m4_m( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vor_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vor_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m4_m( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vor_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m8_m( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vor_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vor_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m8_m( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vor_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vor_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vor_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vor_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m1_m( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vor_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vor_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m1_m( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vor_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m2_m( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vor_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vor_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m2_m( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vor_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m4_m( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vor_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vor_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m4_m( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vor_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m8_m( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vor_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vor_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m8_m( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vor_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m1_m( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vor_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vor_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m1_m( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vor_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m2_m( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vor_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vor_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m2_m( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vor_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m4_m( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vor_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vor_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m4_m( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vor_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m8_m( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vor_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vor_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m8_m( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vor_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c index 82b03e8c3f0a..c2d942f05b5e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf8_i8m1(vector, scalar, vl); + return __riscv_vredand_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1( @@ -21,7 +21,7 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf4_i8m1(vector, scalar, vl); + return __riscv_vredand_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1( @@ -30,7 +30,7 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf2_i8m1(vector, scalar, vl); + return __riscv_vredand_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1( @@ -39,7 +39,7 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m1_i8m1(vector, scalar, vl); + return __riscv_vredand_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1( @@ -48,7 +48,7 @@ vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m2_i8m1(vector, scalar, vl); + return __riscv_vredand_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1( @@ -57,7 +57,7 @@ vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m4_i8m1(vector, scalar, vl); + return __riscv_vredand_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1( @@ -66,7 +66,7 @@ vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m8_i8m1(vector, scalar, vl); + return __riscv_vredand_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1( @@ -75,7 +75,7 @@ vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf4_i16m1(vector, scalar, vl); + return __riscv_vredand_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1( @@ -84,7 +84,7 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf2_i16m1(vector, scalar, vl); + return __riscv_vredand_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1( @@ -93,7 +93,7 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m1_i16m1(vector, scalar, vl); + return __riscv_vredand_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1( @@ -102,7 +102,7 @@ vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m2_i16m1(vector, scalar, vl); + return __riscv_vredand_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1( @@ -111,7 +111,7 @@ vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m4_i16m1(vector, scalar, vl); + return __riscv_vredand_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1( @@ -120,7 +120,7 @@ vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m8_i16m1(vector, scalar, vl); + return __riscv_vredand_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1( @@ -129,7 +129,7 @@ vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1(vector, scalar, vl); + return __riscv_vredand_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1( @@ -138,7 +138,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m1_i32m1(vector, scalar, vl); + return __riscv_vredand_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1( @@ -147,7 +147,7 @@ vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m2_i32m1(vector, scalar, vl); + return __riscv_vredand_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1( @@ -156,7 +156,7 @@ vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m4_i32m1(vector, scalar, vl); + return __riscv_vredand_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1( @@ -165,7 +165,7 @@ vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m8_i32m1(vector, scalar, vl); + return __riscv_vredand_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1( @@ -174,7 +174,7 @@ vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m1_i64m1(vector, scalar, vl); + return __riscv_vredand_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1( @@ -183,7 +183,7 @@ vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m2_i64m1(vector, scalar, vl); + return __riscv_vredand_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1( @@ -192,7 +192,7 @@ vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m4_i64m1(vector, scalar, vl); + return __riscv_vredand_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1( @@ -201,7 +201,7 @@ vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m8_i64m1(vector, scalar, vl); + return __riscv_vredand_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1( @@ -210,7 +210,7 @@ vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf8_u8m1(vector, scalar, vl); + return __riscv_vredand_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1( @@ -219,7 +219,7 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf4_u8m1(vector, scalar, vl); + return __riscv_vredand_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1( @@ -228,7 +228,7 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf2_u8m1(vector, scalar, vl); + return __riscv_vredand_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1( @@ -237,7 +237,7 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m1_u8m1(vector, scalar, vl); + return __riscv_vredand_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1( @@ -246,7 +246,7 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m2_u8m1(vector, scalar, vl); + return __riscv_vredand_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1( @@ -255,7 +255,7 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m4_u8m1(vector, scalar, vl); + return __riscv_vredand_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1( @@ -264,7 +264,7 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m8_u8m1(vector, scalar, vl); + return __riscv_vredand_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1( @@ -273,7 +273,7 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf4_u16m1(vector, scalar, vl); + return __riscv_vredand_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1( @@ -282,7 +282,7 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf2_u16m1(vector, scalar, vl); + return __riscv_vredand_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1( @@ -291,7 +291,7 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m1_u16m1(vector, scalar, vl); + return __riscv_vredand_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1( @@ -300,7 +300,7 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m2_u16m1(vector, scalar, vl); + return __riscv_vredand_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1( @@ -309,7 +309,7 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m4_u16m1(vector, scalar, vl); + return __riscv_vredand_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1( @@ -318,7 +318,7 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m8_u16m1(vector, scalar, vl); + return __riscv_vredand_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1( @@ -327,7 +327,7 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1(vector, scalar, vl); + return __riscv_vredand_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1( @@ -336,7 +336,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m1_u32m1(vector, scalar, vl); + return __riscv_vredand_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1( @@ -345,7 +345,7 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m2_u32m1(vector, scalar, vl); + return __riscv_vredand_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1( @@ -354,7 +354,7 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m4_u32m1(vector, scalar, vl); + return __riscv_vredand_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1( @@ -363,7 +363,7 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m8_u32m1(vector, scalar, vl); + return __riscv_vredand_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1( @@ -372,7 +372,7 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m1_u64m1(vector, scalar, vl); + return __riscv_vredand_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1( @@ -381,7 +381,7 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m2_u64m1(vector, scalar, vl); + return __riscv_vredand_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1( @@ -390,7 +390,7 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m4_u64m1(vector, scalar, vl); + return __riscv_vredand_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1( @@ -399,7 +399,7 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m8_u64m1(vector, scalar, vl); + return __riscv_vredand_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m( @@ -408,7 +408,7 @@ vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m( @@ -417,7 +417,7 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m( @@ -426,7 +426,7 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m( @@ -435,7 +435,7 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m1_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m( @@ -444,7 +444,7 @@ vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m( @@ -453,7 +453,7 @@ vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m( @@ -462,7 +462,7 @@ vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m( @@ -480,7 +480,7 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m( @@ -489,7 +489,7 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m1_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m( @@ -498,7 +498,7 @@ vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m( @@ -507,7 +507,7 @@ vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m( @@ -516,7 +516,7 @@ vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m8_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m( @@ -525,7 +525,7 @@ vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m( @@ -534,7 +534,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m1_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m( @@ -543,7 +543,7 @@ vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m( @@ -552,7 +552,7 @@ vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m4_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m( @@ -561,7 +561,7 @@ vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m8_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m( @@ -570,7 +570,7 @@ vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m1_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m( @@ -579,7 +579,7 @@ vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m2_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m( @@ -588,7 +588,7 @@ vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m4_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m( @@ -597,7 +597,7 @@ vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m8_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m( @@ -606,7 +606,7 @@ vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m( @@ -615,7 +615,7 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m( @@ -624,7 +624,7 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m( @@ -633,7 +633,7 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m1_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m( @@ -642,7 +642,7 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m( @@ -651,7 +651,7 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m( @@ -660,7 +660,7 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m( @@ -669,7 +669,7 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m( @@ -678,7 +678,7 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m( @@ -687,7 +687,7 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m1_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m( @@ -696,7 +696,7 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m( @@ -705,7 +705,7 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m( @@ -714,7 +714,7 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m8_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m( @@ -723,7 +723,7 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m( @@ -732,7 +732,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m1_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m( @@ -741,7 +741,7 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m( @@ -750,7 +750,7 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m4_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m( @@ -759,7 +759,7 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m8_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m( @@ -768,7 +768,7 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m1_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m( @@ -777,7 +777,7 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m2_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m( @@ -786,7 +786,7 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m4_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m( @@ -795,6 +795,6 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m8_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredand_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c index 539fa5909fab..a6e3217db994 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf8_i8m1(vector, scalar, vl); + return __riscv_vredmax_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1( @@ -21,7 +21,7 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf4_i8m1(vector, scalar, vl); + return __riscv_vredmax_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1( @@ -30,7 +30,7 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf2_i8m1(vector, scalar, vl); + return __riscv_vredmax_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1( @@ -39,7 +39,7 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m1_i8m1(vector, scalar, vl); + return __riscv_vredmax_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1( @@ -48,7 +48,7 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m2_i8m1(vector, scalar, vl); + return __riscv_vredmax_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1( @@ -57,7 +57,7 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m4_i8m1(vector, scalar, vl); + return __riscv_vredmax_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1( @@ -66,7 +66,7 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m8_i8m1(vector, scalar, vl); + return __riscv_vredmax_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1( @@ -75,7 +75,7 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf4_i16m1(vector, scalar, vl); + return __riscv_vredmax_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1( @@ -84,7 +84,7 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf2_i16m1(vector, scalar, vl); + return __riscv_vredmax_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1( @@ -93,7 +93,7 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m1_i16m1(vector, scalar, vl); + return __riscv_vredmax_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1( @@ -102,7 +102,7 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m2_i16m1(vector, scalar, vl); + return __riscv_vredmax_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1( @@ -111,7 +111,7 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m4_i16m1(vector, scalar, vl); + return __riscv_vredmax_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1( @@ -120,7 +120,7 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m8_i16m1(vector, scalar, vl); + return __riscv_vredmax_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1( @@ -129,7 +129,7 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1(vector, scalar, vl); + return __riscv_vredmax_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1( @@ -138,7 +138,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m1_i32m1(vector, scalar, vl); + return __riscv_vredmax_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1( @@ -147,7 +147,7 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m2_i32m1(vector, scalar, vl); + return __riscv_vredmax_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1( @@ -156,7 +156,7 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m4_i32m1(vector, scalar, vl); + return __riscv_vredmax_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1( @@ -165,7 +165,7 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m8_i32m1(vector, scalar, vl); + return __riscv_vredmax_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1( @@ -174,7 +174,7 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m1_i64m1(vector, scalar, vl); + return __riscv_vredmax_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1( @@ -183,7 +183,7 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m2_i64m1(vector, scalar, vl); + return __riscv_vredmax_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1( @@ -192,7 +192,7 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m4_i64m1(vector, scalar, vl); + return __riscv_vredmax_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1( @@ -201,7 +201,7 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m8_i64m1(vector, scalar, vl); + return __riscv_vredmax_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( @@ -210,7 +210,7 @@ vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( @@ -219,7 +219,7 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( @@ -228,7 +228,7 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m( @@ -237,7 +237,7 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m1_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m( @@ -246,7 +246,7 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m( @@ -255,7 +255,7 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m( @@ -264,7 +264,7 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( @@ -273,7 +273,7 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( @@ -282,7 +282,7 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m( @@ -291,7 +291,7 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m1_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m( @@ -300,7 +300,7 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m( @@ -309,7 +309,7 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m( @@ -318,7 +318,7 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m8_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( @@ -327,7 +327,7 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m( @@ -336,7 +336,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m1_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m( @@ -345,7 +345,7 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m( @@ -354,7 +354,7 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m4_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m( @@ -363,7 +363,7 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m8_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m( @@ -372,7 +372,7 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m1_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m( @@ -381,7 +381,7 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m2_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m( @@ -390,7 +390,7 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m4_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m( @@ -399,6 +399,6 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m8_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredmax_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c index 605c36b55ead..7c019b36adc9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf8_u8m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1( @@ -21,7 +21,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf4_u8m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1( @@ -30,7 +30,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf2_u8m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1( @@ -39,7 +39,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m1_u8m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1( @@ -48,7 +48,7 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m2_u8m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1( @@ -57,7 +57,7 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m4_u8m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1( @@ -66,7 +66,7 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m8_u8m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf4_u16m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1( @@ -84,7 +84,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scala // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf2_u16m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1( @@ -93,7 +93,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scala // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m1_u16m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1( @@ -102,7 +102,7 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m2_u16m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1( @@ -111,7 +111,7 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m4_u16m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1( @@ -120,7 +120,7 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m8_u16m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1( @@ -129,7 +129,7 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1( @@ -138,7 +138,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scala // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m1_u32m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1( @@ -147,7 +147,7 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m2_u32m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1( @@ -156,7 +156,7 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m4_u32m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1( @@ -165,7 +165,7 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m8_u32m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1( @@ -174,7 +174,7 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m1_u64m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1( @@ -183,7 +183,7 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m2_u64m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1( @@ -192,7 +192,7 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m4_u64m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1( @@ -201,7 +201,7 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m8_u64m1(vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( @@ -210,7 +210,7 @@ vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( @@ -219,7 +219,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( @@ -228,7 +228,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( @@ -237,7 +237,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( @@ -246,7 +246,7 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( @@ -255,7 +255,7 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( @@ -264,7 +264,7 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( @@ -273,7 +273,7 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( @@ -282,7 +282,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( @@ -291,7 +291,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( @@ -300,7 +300,7 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( @@ -309,7 +309,7 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( @@ -318,7 +318,7 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( @@ -327,7 +327,7 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( @@ -336,7 +336,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( @@ -345,7 +345,7 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( @@ -354,7 +354,7 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( @@ -363,7 +363,7 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( @@ -372,7 +372,7 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( @@ -381,7 +381,7 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( @@ -390,7 +390,7 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( @@ -399,6 +399,6 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c index 78634f4966f0..c89e38848525 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf8_i8m1(vector, scalar, vl); + return __riscv_vredmin_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1( @@ -21,7 +21,7 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf4_i8m1(vector, scalar, vl); + return __riscv_vredmin_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1( @@ -30,7 +30,7 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf2_i8m1(vector, scalar, vl); + return __riscv_vredmin_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1( @@ -39,7 +39,7 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m1_i8m1(vector, scalar, vl); + return __riscv_vredmin_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1( @@ -48,7 +48,7 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m2_i8m1(vector, scalar, vl); + return __riscv_vredmin_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1( @@ -57,7 +57,7 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m4_i8m1(vector, scalar, vl); + return __riscv_vredmin_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1( @@ -66,7 +66,7 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m8_i8m1(vector, scalar, vl); + return __riscv_vredmin_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1( @@ -75,7 +75,7 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf4_i16m1(vector, scalar, vl); + return __riscv_vredmin_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1( @@ -84,7 +84,7 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf2_i16m1(vector, scalar, vl); + return __riscv_vredmin_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1( @@ -93,7 +93,7 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m1_i16m1(vector, scalar, vl); + return __riscv_vredmin_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1( @@ -102,7 +102,7 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m2_i16m1(vector, scalar, vl); + return __riscv_vredmin_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1( @@ -111,7 +111,7 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m4_i16m1(vector, scalar, vl); + return __riscv_vredmin_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1( @@ -120,7 +120,7 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m8_i16m1(vector, scalar, vl); + return __riscv_vredmin_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1( @@ -129,7 +129,7 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1(vector, scalar, vl); + return __riscv_vredmin_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1( @@ -138,7 +138,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m1_i32m1(vector, scalar, vl); + return __riscv_vredmin_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1( @@ -147,7 +147,7 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m2_i32m1(vector, scalar, vl); + return __riscv_vredmin_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1( @@ -156,7 +156,7 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m4_i32m1(vector, scalar, vl); + return __riscv_vredmin_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1( @@ -165,7 +165,7 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m8_i32m1(vector, scalar, vl); + return __riscv_vredmin_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1( @@ -174,7 +174,7 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m1_i64m1(vector, scalar, vl); + return __riscv_vredmin_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1( @@ -183,7 +183,7 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m2_i64m1(vector, scalar, vl); + return __riscv_vredmin_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1( @@ -192,7 +192,7 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m4_i64m1(vector, scalar, vl); + return __riscv_vredmin_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1( @@ -201,7 +201,7 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m8_i64m1(vector, scalar, vl); + return __riscv_vredmin_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( @@ -210,7 +210,7 @@ vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( @@ -219,7 +219,7 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( @@ -228,7 +228,7 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m( @@ -237,7 +237,7 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m1_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m( @@ -246,7 +246,7 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m( @@ -255,7 +255,7 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m( @@ -264,7 +264,7 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( @@ -273,7 +273,7 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( @@ -282,7 +282,7 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m( @@ -291,7 +291,7 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m1_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m( @@ -300,7 +300,7 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m( @@ -309,7 +309,7 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m( @@ -318,7 +318,7 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m8_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( @@ -327,7 +327,7 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m( @@ -336,7 +336,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m1_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m( @@ -345,7 +345,7 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m( @@ -354,7 +354,7 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m4_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m( @@ -363,7 +363,7 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m8_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m( @@ -372,7 +372,7 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m1_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m( @@ -381,7 +381,7 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m2_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m( @@ -390,7 +390,7 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m4_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m( @@ -399,6 +399,6 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m8_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredmin_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c index 8518f88850eb..cf6a29425b26 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf8_u8m1(vector, scalar, vl); + return __riscv_vredminu_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1( @@ -21,7 +21,7 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf4_u8m1(vector, scalar, vl); + return __riscv_vredminu_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1( @@ -30,7 +30,7 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf2_u8m1(vector, scalar, vl); + return __riscv_vredminu_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1( @@ -39,7 +39,7 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m1_u8m1(vector, scalar, vl); + return __riscv_vredminu_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1( @@ -48,7 +48,7 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m2_u8m1(vector, scalar, vl); + return __riscv_vredminu_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1( @@ -57,7 +57,7 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m4_u8m1(vector, scalar, vl); + return __riscv_vredminu_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1( @@ -66,7 +66,7 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m8_u8m1(vector, scalar, vl); + return __riscv_vredminu_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf4_u16m1(vector, scalar, vl); + return __riscv_vredminu_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1( @@ -84,7 +84,7 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scala // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf2_u16m1(vector, scalar, vl); + return __riscv_vredminu_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1( @@ -93,7 +93,7 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scala // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m1_u16m1(vector, scalar, vl); + return __riscv_vredminu_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1( @@ -102,7 +102,7 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m2_u16m1(vector, scalar, vl); + return __riscv_vredminu_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1( @@ -111,7 +111,7 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m4_u16m1(vector, scalar, vl); + return __riscv_vredminu_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1( @@ -120,7 +120,7 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m8_u16m1(vector, scalar, vl); + return __riscv_vredminu_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1( @@ -129,7 +129,7 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1(vector, scalar, vl); + return __riscv_vredminu_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1( @@ -138,7 +138,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scala // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m1_u32m1(vector, scalar, vl); + return __riscv_vredminu_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1( @@ -147,7 +147,7 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m2_u32m1(vector, scalar, vl); + return __riscv_vredminu_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1( @@ -156,7 +156,7 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m4_u32m1(vector, scalar, vl); + return __riscv_vredminu_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1( @@ -165,7 +165,7 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m8_u32m1(vector, scalar, vl); + return __riscv_vredminu_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1( @@ -174,7 +174,7 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m1_u64m1(vector, scalar, vl); + return __riscv_vredminu_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1( @@ -183,7 +183,7 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m2_u64m1(vector, scalar, vl); + return __riscv_vredminu_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1( @@ -192,7 +192,7 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m4_u64m1(vector, scalar, vl); + return __riscv_vredminu_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1( @@ -201,7 +201,7 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m8_u64m1(vector, scalar, vl); + return __riscv_vredminu_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( @@ -210,7 +210,7 @@ vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( @@ -219,7 +219,7 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( @@ -228,7 +228,7 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m( @@ -237,7 +237,7 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m( @@ -246,7 +246,7 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m( @@ -255,7 +255,7 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m( @@ -264,7 +264,7 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( @@ -273,7 +273,7 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( @@ -282,7 +282,7 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m( @@ -291,7 +291,7 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m( @@ -300,7 +300,7 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m( @@ -309,7 +309,7 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m( @@ -318,7 +318,7 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( @@ -327,7 +327,7 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m( @@ -336,7 +336,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m( @@ -345,7 +345,7 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m( @@ -354,7 +354,7 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m( @@ -363,7 +363,7 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m( @@ -372,7 +372,7 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m( @@ -381,7 +381,7 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m( @@ -390,7 +390,7 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m( @@ -399,6 +399,6 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredminu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c index c12ffdc50d55..5b2955aaf056 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf8_i8m1(vector, scalar, vl); + return __riscv_vredor_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1( @@ -21,7 +21,7 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf4_i8m1(vector, scalar, vl); + return __riscv_vredor_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1( @@ -30,7 +30,7 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf2_i8m1(vector, scalar, vl); + return __riscv_vredor_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1( @@ -39,7 +39,7 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m1_i8m1(vector, scalar, vl); + return __riscv_vredor_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1( @@ -48,7 +48,7 @@ vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m2_i8m1(vector, scalar, vl); + return __riscv_vredor_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1( @@ -57,7 +57,7 @@ vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m4_i8m1(vector, scalar, vl); + return __riscv_vredor_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1( @@ -66,7 +66,7 @@ vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m8_i8m1(vector, scalar, vl); + return __riscv_vredor_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1( @@ -75,7 +75,7 @@ vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf4_i16m1(vector, scalar, vl); + return __riscv_vredor_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1( @@ -84,7 +84,7 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf2_i16m1(vector, scalar, vl); + return __riscv_vredor_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1( @@ -93,7 +93,7 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m1_i16m1(vector, scalar, vl); + return __riscv_vredor_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1( @@ -102,7 +102,7 @@ vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m2_i16m1(vector, scalar, vl); + return __riscv_vredor_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1( @@ -111,7 +111,7 @@ vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m4_i16m1(vector, scalar, vl); + return __riscv_vredor_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1( @@ -120,7 +120,7 @@ vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m8_i16m1(vector, scalar, vl); + return __riscv_vredor_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1( @@ -129,7 +129,7 @@ vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1(vector, scalar, vl); + return __riscv_vredor_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1( @@ -138,7 +138,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m1_i32m1(vector, scalar, vl); + return __riscv_vredor_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1( @@ -147,7 +147,7 @@ vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m2_i32m1(vector, scalar, vl); + return __riscv_vredor_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1( @@ -156,7 +156,7 @@ vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m4_i32m1(vector, scalar, vl); + return __riscv_vredor_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1( @@ -165,7 +165,7 @@ vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m8_i32m1(vector, scalar, vl); + return __riscv_vredor_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1( @@ -174,7 +174,7 @@ vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m1_i64m1(vector, scalar, vl); + return __riscv_vredor_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1( @@ -183,7 +183,7 @@ vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m2_i64m1(vector, scalar, vl); + return __riscv_vredor_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1( @@ -192,7 +192,7 @@ vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m4_i64m1(vector, scalar, vl); + return __riscv_vredor_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1( @@ -201,7 +201,7 @@ vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m8_i64m1(vector, scalar, vl); + return __riscv_vredor_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1( @@ -210,7 +210,7 @@ vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf8_u8m1(vector, scalar, vl); + return __riscv_vredor_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1( @@ -219,7 +219,7 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf4_u8m1(vector, scalar, vl); + return __riscv_vredor_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1( @@ -228,7 +228,7 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf2_u8m1(vector, scalar, vl); + return __riscv_vredor_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1( @@ -237,7 +237,7 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m1_u8m1(vector, scalar, vl); + return __riscv_vredor_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1( @@ -246,7 +246,7 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m2_u8m1(vector, scalar, vl); + return __riscv_vredor_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1( @@ -255,7 +255,7 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m4_u8m1(vector, scalar, vl); + return __riscv_vredor_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1( @@ -264,7 +264,7 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m8_u8m1(vector, scalar, vl); + return __riscv_vredor_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1( @@ -273,7 +273,7 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf4_u16m1(vector, scalar, vl); + return __riscv_vredor_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1( @@ -282,7 +282,7 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf2_u16m1(vector, scalar, vl); + return __riscv_vredor_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1( @@ -291,7 +291,7 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m1_u16m1(vector, scalar, vl); + return __riscv_vredor_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1( @@ -300,7 +300,7 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m2_u16m1(vector, scalar, vl); + return __riscv_vredor_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1( @@ -309,7 +309,7 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m4_u16m1(vector, scalar, vl); + return __riscv_vredor_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1( @@ -318,7 +318,7 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m8_u16m1(vector, scalar, vl); + return __riscv_vredor_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1( @@ -327,7 +327,7 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1(vector, scalar, vl); + return __riscv_vredor_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1( @@ -336,7 +336,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m1_u32m1(vector, scalar, vl); + return __riscv_vredor_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1( @@ -345,7 +345,7 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m2_u32m1(vector, scalar, vl); + return __riscv_vredor_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1( @@ -354,7 +354,7 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m4_u32m1(vector, scalar, vl); + return __riscv_vredor_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1( @@ -363,7 +363,7 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m8_u32m1(vector, scalar, vl); + return __riscv_vredor_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1( @@ -372,7 +372,7 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m1_u64m1(vector, scalar, vl); + return __riscv_vredor_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1( @@ -381,7 +381,7 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m2_u64m1(vector, scalar, vl); + return __riscv_vredor_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1( @@ -390,7 +390,7 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m4_u64m1(vector, scalar, vl); + return __riscv_vredor_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1( @@ -399,7 +399,7 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m8_u64m1(vector, scalar, vl); + return __riscv_vredor_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m( @@ -408,7 +408,7 @@ vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m( @@ -417,7 +417,7 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m( @@ -426,7 +426,7 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m( @@ -435,7 +435,7 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m( @@ -444,7 +444,7 @@ vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m( @@ -453,7 +453,7 @@ vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m( @@ -462,7 +462,7 @@ vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m( @@ -480,7 +480,7 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m( @@ -489,7 +489,7 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m( @@ -498,7 +498,7 @@ vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m( @@ -507,7 +507,7 @@ vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m( @@ -516,7 +516,7 @@ vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m( @@ -525,7 +525,7 @@ vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m( @@ -534,7 +534,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m( @@ -543,7 +543,7 @@ vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m( @@ -552,7 +552,7 @@ vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m( @@ -561,7 +561,7 @@ vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m( @@ -570,7 +570,7 @@ vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m( @@ -579,7 +579,7 @@ vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m( @@ -588,7 +588,7 @@ vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m( @@ -597,7 +597,7 @@ vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m( @@ -606,7 +606,7 @@ vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m( @@ -615,7 +615,7 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m( @@ -624,7 +624,7 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m( @@ -633,7 +633,7 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m( @@ -642,7 +642,7 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m( @@ -651,7 +651,7 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m( @@ -660,7 +660,7 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m( @@ -669,7 +669,7 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m( @@ -678,7 +678,7 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m( @@ -687,7 +687,7 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m( @@ -696,7 +696,7 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m( @@ -705,7 +705,7 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m( @@ -714,7 +714,7 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m( @@ -723,7 +723,7 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m( @@ -732,7 +732,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m( @@ -741,7 +741,7 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m( @@ -750,7 +750,7 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m( @@ -759,7 +759,7 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m( @@ -768,7 +768,7 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m( @@ -777,7 +777,7 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m( @@ -786,7 +786,7 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m( @@ -795,6 +795,6 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c index 195d84f5facc..14ee95394a59 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf8_i8m1(vector, scalar, vl); + return __riscv_vredsum_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1( @@ -21,7 +21,7 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf4_i8m1(vector, scalar, vl); + return __riscv_vredsum_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1( @@ -30,7 +30,7 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf2_i8m1(vector, scalar, vl); + return __riscv_vredsum_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1( @@ -39,7 +39,7 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m1_i8m1(vector, scalar, vl); + return __riscv_vredsum_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1( @@ -48,7 +48,7 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m2_i8m1(vector, scalar, vl); + return __riscv_vredsum_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1( @@ -57,7 +57,7 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m4_i8m1(vector, scalar, vl); + return __riscv_vredsum_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1( @@ -66,7 +66,7 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m8_i8m1(vector, scalar, vl); + return __riscv_vredsum_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1( @@ -75,7 +75,7 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf4_i16m1(vector, scalar, vl); + return __riscv_vredsum_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1( @@ -84,7 +84,7 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf2_i16m1(vector, scalar, vl); + return __riscv_vredsum_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1( @@ -93,7 +93,7 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m1_i16m1(vector, scalar, vl); + return __riscv_vredsum_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1( @@ -102,7 +102,7 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m2_i16m1(vector, scalar, vl); + return __riscv_vredsum_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1( @@ -111,7 +111,7 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m4_i16m1(vector, scalar, vl); + return __riscv_vredsum_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1( @@ -120,7 +120,7 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m8_i16m1(vector, scalar, vl); + return __riscv_vredsum_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1( @@ -129,7 +129,7 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1(vector, scalar, vl); + return __riscv_vredsum_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1( @@ -138,7 +138,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m1_i32m1(vector, scalar, vl); + return __riscv_vredsum_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1( @@ -147,7 +147,7 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m2_i32m1(vector, scalar, vl); + return __riscv_vredsum_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1( @@ -156,7 +156,7 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m4_i32m1(vector, scalar, vl); + return __riscv_vredsum_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1( @@ -165,7 +165,7 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m8_i32m1(vector, scalar, vl); + return __riscv_vredsum_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1( @@ -174,7 +174,7 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m1_i64m1(vector, scalar, vl); + return __riscv_vredsum_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1( @@ -183,7 +183,7 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m2_i64m1(vector, scalar, vl); + return __riscv_vredsum_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1( @@ -192,7 +192,7 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m4_i64m1(vector, scalar, vl); + return __riscv_vredsum_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1( @@ -201,7 +201,7 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m8_i64m1(vector, scalar, vl); + return __riscv_vredsum_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1( @@ -210,7 +210,7 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf8_u8m1(vector, scalar, vl); + return __riscv_vredsum_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1( @@ -219,7 +219,7 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf4_u8m1(vector, scalar, vl); + return __riscv_vredsum_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1( @@ -228,7 +228,7 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf2_u8m1(vector, scalar, vl); + return __riscv_vredsum_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1( @@ -237,7 +237,7 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m1_u8m1(vector, scalar, vl); + return __riscv_vredsum_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1( @@ -246,7 +246,7 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m2_u8m1(vector, scalar, vl); + return __riscv_vredsum_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1( @@ -255,7 +255,7 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m4_u8m1(vector, scalar, vl); + return __riscv_vredsum_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1( @@ -264,7 +264,7 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m8_u8m1(vector, scalar, vl); + return __riscv_vredsum_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1( @@ -273,7 +273,7 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf4_u16m1(vector, scalar, vl); + return __riscv_vredsum_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1( @@ -282,7 +282,7 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf2_u16m1(vector, scalar, vl); + return __riscv_vredsum_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1( @@ -291,7 +291,7 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m1_u16m1(vector, scalar, vl); + return __riscv_vredsum_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1( @@ -300,7 +300,7 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m2_u16m1(vector, scalar, vl); + return __riscv_vredsum_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1( @@ -309,7 +309,7 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m4_u16m1(vector, scalar, vl); + return __riscv_vredsum_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1( @@ -318,7 +318,7 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m8_u16m1(vector, scalar, vl); + return __riscv_vredsum_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1( @@ -327,7 +327,7 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1(vector, scalar, vl); + return __riscv_vredsum_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1( @@ -336,7 +336,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m1_u32m1(vector, scalar, vl); + return __riscv_vredsum_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1( @@ -345,7 +345,7 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m2_u32m1(vector, scalar, vl); + return __riscv_vredsum_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1( @@ -354,7 +354,7 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m4_u32m1(vector, scalar, vl); + return __riscv_vredsum_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1( @@ -363,7 +363,7 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m8_u32m1(vector, scalar, vl); + return __riscv_vredsum_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1( @@ -372,7 +372,7 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m1_u64m1(vector, scalar, vl); + return __riscv_vredsum_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1( @@ -381,7 +381,7 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m2_u64m1(vector, scalar, vl); + return __riscv_vredsum_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1( @@ -390,7 +390,7 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m4_u64m1(vector, scalar, vl); + return __riscv_vredsum_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1( @@ -399,7 +399,7 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m8_u64m1(vector, scalar, vl); + return __riscv_vredsum_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( @@ -408,7 +408,7 @@ vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( @@ -417,7 +417,7 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( @@ -426,7 +426,7 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m( @@ -435,7 +435,7 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m1_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m( @@ -444,7 +444,7 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m( @@ -453,7 +453,7 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m( @@ -462,7 +462,7 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( @@ -480,7 +480,7 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m( @@ -489,7 +489,7 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m1_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m( @@ -498,7 +498,7 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m( @@ -507,7 +507,7 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m( @@ -516,7 +516,7 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m8_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( @@ -525,7 +525,7 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m( @@ -534,7 +534,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m1_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m( @@ -543,7 +543,7 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m( @@ -552,7 +552,7 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m4_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m( @@ -561,7 +561,7 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m8_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m( @@ -570,7 +570,7 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m1_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m( @@ -579,7 +579,7 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m2_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m( @@ -588,7 +588,7 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m4_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m( @@ -597,7 +597,7 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m8_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( @@ -606,7 +606,7 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( @@ -615,7 +615,7 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( @@ -624,7 +624,7 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m( @@ -633,7 +633,7 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m1_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m( @@ -642,7 +642,7 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m( @@ -651,7 +651,7 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m( @@ -660,7 +660,7 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( @@ -669,7 +669,7 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( @@ -678,7 +678,7 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m( @@ -687,7 +687,7 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m1_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m( @@ -696,7 +696,7 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m( @@ -705,7 +705,7 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m( @@ -714,7 +714,7 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m8_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( @@ -723,7 +723,7 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m( @@ -732,7 +732,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m1_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m( @@ -741,7 +741,7 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m( @@ -750,7 +750,7 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m4_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m( @@ -759,7 +759,7 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m8_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m( @@ -768,7 +768,7 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m1_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m( @@ -777,7 +777,7 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m2_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m( @@ -786,7 +786,7 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m4_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m( @@ -795,6 +795,6 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m8_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredsum_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c index 064e0ec4b5e2..fe5409fc274c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf8_i8m1(vector, scalar, vl); + return __riscv_vredxor_vs_i8mf8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1( @@ -21,7 +21,7 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf4_i8m1(vector, scalar, vl); + return __riscv_vredxor_vs_i8mf4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1( @@ -30,7 +30,7 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf2_i8m1(vector, scalar, vl); + return __riscv_vredxor_vs_i8mf2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1( @@ -39,7 +39,7 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m1_i8m1(vector, scalar, vl); + return __riscv_vredxor_vs_i8m1_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1( @@ -48,7 +48,7 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m2_i8m1(vector, scalar, vl); + return __riscv_vredxor_vs_i8m2_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1( @@ -57,7 +57,7 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m4_i8m1(vector, scalar, vl); + return __riscv_vredxor_vs_i8m4_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1( @@ -66,7 +66,7 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m8_i8m1(vector, scalar, vl); + return __riscv_vredxor_vs_i8m8_i8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1( @@ -75,7 +75,7 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf4_i16m1(vector, scalar, vl); + return __riscv_vredxor_vs_i16mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1( @@ -84,7 +84,7 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf2_i16m1(vector, scalar, vl); + return __riscv_vredxor_vs_i16mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1( @@ -93,7 +93,7 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m1_i16m1(vector, scalar, vl); + return __riscv_vredxor_vs_i16m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1( @@ -102,7 +102,7 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m2_i16m1(vector, scalar, vl); + return __riscv_vredxor_vs_i16m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1( @@ -111,7 +111,7 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m4_i16m1(vector, scalar, vl); + return __riscv_vredxor_vs_i16m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1( @@ -120,7 +120,7 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m8_i16m1(vector, scalar, vl); + return __riscv_vredxor_vs_i16m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1( @@ -129,7 +129,7 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1(vector, scalar, vl); + return __riscv_vredxor_vs_i32mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1( @@ -138,7 +138,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m1_i32m1(vector, scalar, vl); + return __riscv_vredxor_vs_i32m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1( @@ -147,7 +147,7 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m2_i32m1(vector, scalar, vl); + return __riscv_vredxor_vs_i32m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1( @@ -156,7 +156,7 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m4_i32m1(vector, scalar, vl); + return __riscv_vredxor_vs_i32m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1( @@ -165,7 +165,7 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m8_i32m1(vector, scalar, vl); + return __riscv_vredxor_vs_i32m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1( @@ -174,7 +174,7 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m1_i64m1(vector, scalar, vl); + return __riscv_vredxor_vs_i64m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1( @@ -183,7 +183,7 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m2_i64m1(vector, scalar, vl); + return __riscv_vredxor_vs_i64m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1( @@ -192,7 +192,7 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m4_i64m1(vector, scalar, vl); + return __riscv_vredxor_vs_i64m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1( @@ -201,7 +201,7 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m8_i64m1(vector, scalar, vl); + return __riscv_vredxor_vs_i64m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1( @@ -210,7 +210,7 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf8_u8m1(vector, scalar, vl); + return __riscv_vredxor_vs_u8mf8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1( @@ -219,7 +219,7 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf4_u8m1(vector, scalar, vl); + return __riscv_vredxor_vs_u8mf4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1( @@ -228,7 +228,7 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf2_u8m1(vector, scalar, vl); + return __riscv_vredxor_vs_u8mf2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1( @@ -237,7 +237,7 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m1_u8m1(vector, scalar, vl); + return __riscv_vredxor_vs_u8m1_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1( @@ -246,7 +246,7 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m2_u8m1(vector, scalar, vl); + return __riscv_vredxor_vs_u8m2_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1( @@ -255,7 +255,7 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m4_u8m1(vector, scalar, vl); + return __riscv_vredxor_vs_u8m4_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1( @@ -264,7 +264,7 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m8_u8m1(vector, scalar, vl); + return __riscv_vredxor_vs_u8m8_u8m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1( @@ -273,7 +273,7 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf4_u16m1(vector, scalar, vl); + return __riscv_vredxor_vs_u16mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1( @@ -282,7 +282,7 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf2_u16m1(vector, scalar, vl); + return __riscv_vredxor_vs_u16mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1( @@ -291,7 +291,7 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m1_u16m1(vector, scalar, vl); + return __riscv_vredxor_vs_u16m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1( @@ -300,7 +300,7 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m2_u16m1(vector, scalar, vl); + return __riscv_vredxor_vs_u16m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1( @@ -309,7 +309,7 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m4_u16m1(vector, scalar, vl); + return __riscv_vredxor_vs_u16m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1( @@ -318,7 +318,7 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m8_u16m1(vector, scalar, vl); + return __riscv_vredxor_vs_u16m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1( @@ -327,7 +327,7 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1(vector, scalar, vl); + return __riscv_vredxor_vs_u32mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1( @@ -336,7 +336,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m1_u32m1(vector, scalar, vl); + return __riscv_vredxor_vs_u32m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1( @@ -345,7 +345,7 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m2_u32m1(vector, scalar, vl); + return __riscv_vredxor_vs_u32m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1( @@ -354,7 +354,7 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m4_u32m1(vector, scalar, vl); + return __riscv_vredxor_vs_u32m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1( @@ -363,7 +363,7 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m8_u32m1(vector, scalar, vl); + return __riscv_vredxor_vs_u32m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1( @@ -372,7 +372,7 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m1_u64m1(vector, scalar, vl); + return __riscv_vredxor_vs_u64m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1( @@ -381,7 +381,7 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m2_u64m1(vector, scalar, vl); + return __riscv_vredxor_vs_u64m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1( @@ -390,7 +390,7 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m4_u64m1(vector, scalar, vl); + return __riscv_vredxor_vs_u64m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1( @@ -399,7 +399,7 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m8_u64m1(vector, scalar, vl); + return __riscv_vredxor_vs_u64m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( @@ -408,7 +408,7 @@ vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( @@ -417,7 +417,7 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( @@ -426,7 +426,7 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m( @@ -435,7 +435,7 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m( @@ -444,7 +444,7 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m( @@ -453,7 +453,7 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m( @@ -462,7 +462,7 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( @@ -480,7 +480,7 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m( @@ -489,7 +489,7 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m( @@ -498,7 +498,7 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m( @@ -507,7 +507,7 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m( @@ -516,7 +516,7 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( @@ -525,7 +525,7 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m( @@ -534,7 +534,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m( @@ -543,7 +543,7 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m( @@ -552,7 +552,7 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m( @@ -561,7 +561,7 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m( @@ -570,7 +570,7 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m( @@ -579,7 +579,7 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m( @@ -588,7 +588,7 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m( @@ -597,7 +597,7 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( @@ -606,7 +606,7 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( @@ -615,7 +615,7 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( @@ -624,7 +624,7 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m( @@ -633,7 +633,7 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m( @@ -642,7 +642,7 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m( @@ -651,7 +651,7 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m( @@ -660,7 +660,7 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( @@ -669,7 +669,7 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( @@ -678,7 +678,7 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m( @@ -687,7 +687,7 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m( @@ -696,7 +696,7 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m( @@ -705,7 +705,7 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m( @@ -714,7 +714,7 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( @@ -723,7 +723,7 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m( @@ -732,7 +732,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m( @@ -741,7 +741,7 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m( @@ -750,7 +750,7 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m( @@ -759,7 +759,7 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m( @@ -768,7 +768,7 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m( @@ -777,7 +777,7 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m( @@ -786,7 +786,7 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m( @@ -795,6 +795,6 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); + return __riscv_vredxor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vreinterpret.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vreinterpret.c index c5d71fb2f0c2..f11aacd35b18 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vreinterpret.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vreinterpret.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint8mf8_t test_vreinterpret_v_i8mf8_u8mf8(vint8mf8_t src) { - return vreinterpret_v_i8mf8_u8mf8(src); + return __riscv_vreinterpret_v_i8mf8_u8mf8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_u8mf4( @@ -20,7 +20,7 @@ vuint8mf8_t test_vreinterpret_v_i8mf8_u8mf8(vint8mf8_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint8mf4_t test_vreinterpret_v_i8mf4_u8mf4(vint8mf4_t src) { - return vreinterpret_v_i8mf4_u8mf4(src); + return __riscv_vreinterpret_v_i8mf4_u8mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_u8mf2( @@ -28,7 +28,7 @@ vuint8mf4_t test_vreinterpret_v_i8mf4_u8mf4(vint8mf4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint8mf2_t test_vreinterpret_v_i8mf2_u8mf2(vint8mf2_t src) { - return vreinterpret_v_i8mf2_u8mf2(src); + return __riscv_vreinterpret_v_i8mf2_u8mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_u8m1( @@ -36,7 +36,7 @@ vuint8mf2_t test_vreinterpret_v_i8mf2_u8mf2(vint8mf2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint8m1_t test_vreinterpret_v_i8m1_u8m1(vint8m1_t src) { - return vreinterpret_v_i8m1_u8m1(src); + return __riscv_vreinterpret_v_i8m1_u8m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_u8m2( @@ -44,7 +44,7 @@ vuint8m1_t test_vreinterpret_v_i8m1_u8m1(vint8m1_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint8m2_t test_vreinterpret_v_i8m2_u8m2(vint8m2_t src) { - return vreinterpret_v_i8m2_u8m2(src); + return __riscv_vreinterpret_v_i8m2_u8m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_u8m4( @@ -52,7 +52,7 @@ vuint8m2_t test_vreinterpret_v_i8m2_u8m2(vint8m2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint8m4_t test_vreinterpret_v_i8m4_u8m4(vint8m4_t src) { - return vreinterpret_v_i8m4_u8m4(src); + return __riscv_vreinterpret_v_i8m4_u8m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_u8m8( @@ -60,7 +60,7 @@ vuint8m4_t test_vreinterpret_v_i8m4_u8m4(vint8m4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint8m8_t test_vreinterpret_v_i8m8_u8m8(vint8m8_t src) { - return vreinterpret_v_i8m8_u8m8(src); + return __riscv_vreinterpret_v_i8m8_u8m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf8_i8mf8( @@ -68,7 +68,7 @@ vuint8m8_t test_vreinterpret_v_i8m8_u8m8(vint8m8_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint8mf8_t test_vreinterpret_v_u8mf8_i8mf8(vuint8mf8_t src) { - return vreinterpret_v_u8mf8_i8mf8(src); + return __riscv_vreinterpret_v_u8mf8_i8mf8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_i8mf4( @@ -76,7 +76,7 @@ vint8mf8_t test_vreinterpret_v_u8mf8_i8mf8(vuint8mf8_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint8mf4_t test_vreinterpret_v_u8mf4_i8mf4(vuint8mf4_t src) { - return vreinterpret_v_u8mf4_i8mf4(src); + return __riscv_vreinterpret_v_u8mf4_i8mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_i8mf2( @@ -84,7 +84,7 @@ vint8mf4_t test_vreinterpret_v_u8mf4_i8mf4(vuint8mf4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint8mf2_t test_vreinterpret_v_u8mf2_i8mf2(vuint8mf2_t src) { - return vreinterpret_v_u8mf2_i8mf2(src); + return __riscv_vreinterpret_v_u8mf2_i8mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_i8m1( @@ -92,7 +92,7 @@ vint8mf2_t test_vreinterpret_v_u8mf2_i8mf2(vuint8mf2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint8m1_t test_vreinterpret_v_u8m1_i8m1(vuint8m1_t src) { - return vreinterpret_v_u8m1_i8m1(src); + return __riscv_vreinterpret_v_u8m1_i8m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_i8m2( @@ -100,7 +100,7 @@ vint8m1_t test_vreinterpret_v_u8m1_i8m1(vuint8m1_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint8m2_t test_vreinterpret_v_u8m2_i8m2(vuint8m2_t src) { - return vreinterpret_v_u8m2_i8m2(src); + return __riscv_vreinterpret_v_u8m2_i8m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_i8m4( @@ -108,7 +108,7 @@ vint8m2_t test_vreinterpret_v_u8m2_i8m2(vuint8m2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint8m4_t test_vreinterpret_v_u8m4_i8m4(vuint8m4_t src) { - return vreinterpret_v_u8m4_i8m4(src); + return __riscv_vreinterpret_v_u8m4_i8m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_i8m8( @@ -116,7 +116,7 @@ vint8m4_t test_vreinterpret_v_u8m4_i8m4(vuint8m4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint8m8_t test_vreinterpret_v_u8m8_i8m8(vuint8m8_t src) { - return vreinterpret_v_u8m8_i8m8(src); + return __riscv_vreinterpret_v_u8m8_i8m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_f16mf4( @@ -125,7 +125,7 @@ vint8m8_t test_vreinterpret_v_u8m8_i8m8(vuint8m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vreinterpret_v_i16mf4_f16mf4(vint16mf4_t src) { - return vreinterpret_v_i16mf4_f16mf4(src); + return __riscv_vreinterpret_v_i16mf4_f16mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_f16mf2( @@ -134,7 +134,7 @@ vfloat16mf4_t test_vreinterpret_v_i16mf4_f16mf4(vint16mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vreinterpret_v_i16mf2_f16mf2(vint16mf2_t src) { - return vreinterpret_v_i16mf2_f16mf2(src); + return __riscv_vreinterpret_v_i16mf2_f16mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_f16m1( @@ -143,7 +143,7 @@ vfloat16mf2_t test_vreinterpret_v_i16mf2_f16mf2(vint16mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vreinterpret_v_i16m1_f16m1(vint16m1_t src) { - return vreinterpret_v_i16m1_f16m1(src); + return __riscv_vreinterpret_v_i16m1_f16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_f16m2( @@ -152,7 +152,7 @@ vfloat16m1_t test_vreinterpret_v_i16m1_f16m1(vint16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vreinterpret_v_i16m2_f16m2(vint16m2_t src) { - return vreinterpret_v_i16m2_f16m2(src); + return __riscv_vreinterpret_v_i16m2_f16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_f16m4( @@ -161,7 +161,7 @@ vfloat16m2_t test_vreinterpret_v_i16m2_f16m2(vint16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vreinterpret_v_i16m4_f16m4(vint16m4_t src) { - return vreinterpret_v_i16m4_f16m4(src); + return __riscv_vreinterpret_v_i16m4_f16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_f16m8( @@ -170,7 +170,7 @@ vfloat16m4_t test_vreinterpret_v_i16m4_f16m4(vint16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vreinterpret_v_i16m8_f16m8(vint16m8_t src) { - return vreinterpret_v_i16m8_f16m8(src); + return __riscv_vreinterpret_v_i16m8_f16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_f16mf4( @@ -179,7 +179,7 @@ vfloat16m8_t test_vreinterpret_v_i16m8_f16m8(vint16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vreinterpret_v_u16mf4_f16mf4(vuint16mf4_t src) { - return vreinterpret_v_u16mf4_f16mf4(src); + return __riscv_vreinterpret_v_u16mf4_f16mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_f16mf2( @@ -188,7 +188,7 @@ vfloat16mf4_t test_vreinterpret_v_u16mf4_f16mf4(vuint16mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vreinterpret_v_u16mf2_f16mf2(vuint16mf2_t src) { - return vreinterpret_v_u16mf2_f16mf2(src); + return __riscv_vreinterpret_v_u16mf2_f16mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_f16m1( @@ -197,7 +197,7 @@ vfloat16mf2_t test_vreinterpret_v_u16mf2_f16mf2(vuint16mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vreinterpret_v_u16m1_f16m1(vuint16m1_t src) { - return vreinterpret_v_u16m1_f16m1(src); + return __riscv_vreinterpret_v_u16m1_f16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_f16m2( @@ -206,7 +206,7 @@ vfloat16m1_t test_vreinterpret_v_u16m1_f16m1(vuint16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vreinterpret_v_u16m2_f16m2(vuint16m2_t src) { - return vreinterpret_v_u16m2_f16m2(src); + return __riscv_vreinterpret_v_u16m2_f16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_f16m4( @@ -215,7 +215,7 @@ vfloat16m2_t test_vreinterpret_v_u16m2_f16m2(vuint16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vreinterpret_v_u16m4_f16m4(vuint16m4_t src) { - return vreinterpret_v_u16m4_f16m4(src); + return __riscv_vreinterpret_v_u16m4_f16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_f16m8( @@ -224,7 +224,7 @@ vfloat16m4_t test_vreinterpret_v_u16m4_f16m4(vuint16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vreinterpret_v_u16m8_f16m8(vuint16m8_t src) { - return vreinterpret_v_u16m8_f16m8(src); + return __riscv_vreinterpret_v_u16m8_f16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_u16mf4( @@ -232,7 +232,7 @@ vfloat16m8_t test_vreinterpret_v_u16m8_f16m8(vuint16m8_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint16mf4_t test_vreinterpret_v_i16mf4_u16mf4(vint16mf4_t src) { - return vreinterpret_v_i16mf4_u16mf4(src); + return __riscv_vreinterpret_v_i16mf4_u16mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_u16mf2( @@ -240,7 +240,7 @@ vuint16mf4_t test_vreinterpret_v_i16mf4_u16mf4(vint16mf4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint16mf2_t test_vreinterpret_v_i16mf2_u16mf2(vint16mf2_t src) { - return vreinterpret_v_i16mf2_u16mf2(src); + return __riscv_vreinterpret_v_i16mf2_u16mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_u16m1( @@ -248,7 +248,7 @@ vuint16mf2_t test_vreinterpret_v_i16mf2_u16mf2(vint16mf2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint16m1_t test_vreinterpret_v_i16m1_u16m1(vint16m1_t src) { - return vreinterpret_v_i16m1_u16m1(src); + return __riscv_vreinterpret_v_i16m1_u16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_u16m2( @@ -256,7 +256,7 @@ vuint16m1_t test_vreinterpret_v_i16m1_u16m1(vint16m1_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint16m2_t test_vreinterpret_v_i16m2_u16m2(vint16m2_t src) { - return vreinterpret_v_i16m2_u16m2(src); + return __riscv_vreinterpret_v_i16m2_u16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_u16m4( @@ -264,7 +264,7 @@ vuint16m2_t test_vreinterpret_v_i16m2_u16m2(vint16m2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint16m4_t test_vreinterpret_v_i16m4_u16m4(vint16m4_t src) { - return vreinterpret_v_i16m4_u16m4(src); + return __riscv_vreinterpret_v_i16m4_u16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_u16m8( @@ -272,7 +272,7 @@ vuint16m4_t test_vreinterpret_v_i16m4_u16m4(vint16m4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint16m8_t test_vreinterpret_v_i16m8_u16m8(vint16m8_t src) { - return vreinterpret_v_i16m8_u16m8(src); + return __riscv_vreinterpret_v_i16m8_u16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_i16mf4( @@ -280,7 +280,7 @@ vuint16m8_t test_vreinterpret_v_i16m8_u16m8(vint16m8_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint16mf4_t test_vreinterpret_v_u16mf4_i16mf4(vuint16mf4_t src) { - return vreinterpret_v_u16mf4_i16mf4(src); + return __riscv_vreinterpret_v_u16mf4_i16mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_i16mf2( @@ -288,7 +288,7 @@ vint16mf4_t test_vreinterpret_v_u16mf4_i16mf4(vuint16mf4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint16mf2_t test_vreinterpret_v_u16mf2_i16mf2(vuint16mf2_t src) { - return vreinterpret_v_u16mf2_i16mf2(src); + return __riscv_vreinterpret_v_u16mf2_i16mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_i16m1( @@ -296,7 +296,7 @@ vint16mf2_t test_vreinterpret_v_u16mf2_i16mf2(vuint16mf2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint16m1_t test_vreinterpret_v_u16m1_i16m1(vuint16m1_t src) { - return vreinterpret_v_u16m1_i16m1(src); + return __riscv_vreinterpret_v_u16m1_i16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_i16m2( @@ -304,7 +304,7 @@ vint16m1_t test_vreinterpret_v_u16m1_i16m1(vuint16m1_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint16m2_t test_vreinterpret_v_u16m2_i16m2(vuint16m2_t src) { - return vreinterpret_v_u16m2_i16m2(src); + return __riscv_vreinterpret_v_u16m2_i16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_i16m4( @@ -312,7 +312,7 @@ vint16m2_t test_vreinterpret_v_u16m2_i16m2(vuint16m2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint16m4_t test_vreinterpret_v_u16m4_i16m4(vuint16m4_t src) { - return vreinterpret_v_u16m4_i16m4(src); + return __riscv_vreinterpret_v_u16m4_i16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_i16m8( @@ -320,7 +320,7 @@ vint16m4_t test_vreinterpret_v_u16m4_i16m4(vuint16m4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint16m8_t test_vreinterpret_v_u16m8_i16m8(vuint16m8_t src) { - return vreinterpret_v_u16m8_i16m8(src); + return __riscv_vreinterpret_v_u16m8_i16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_i16mf4( @@ -329,7 +329,7 @@ vint16m8_t test_vreinterpret_v_u16m8_i16m8(vuint16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vreinterpret_v_f16mf4_i16mf4(vfloat16mf4_t src) { - return vreinterpret_v_f16mf4_i16mf4(src); + return __riscv_vreinterpret_v_f16mf4_i16mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_i16mf2( @@ -338,7 +338,7 @@ vint16mf4_t test_vreinterpret_v_f16mf4_i16mf4(vfloat16mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vreinterpret_v_f16mf2_i16mf2(vfloat16mf2_t src) { - return vreinterpret_v_f16mf2_i16mf2(src); + return __riscv_vreinterpret_v_f16mf2_i16mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_i16m1( @@ -347,7 +347,7 @@ vint16mf2_t test_vreinterpret_v_f16mf2_i16mf2(vfloat16mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vreinterpret_v_f16m1_i16m1(vfloat16m1_t src) { - return vreinterpret_v_f16m1_i16m1(src); + return __riscv_vreinterpret_v_f16m1_i16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_i16m2( @@ -356,7 +356,7 @@ vint16m1_t test_vreinterpret_v_f16m1_i16m1(vfloat16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vreinterpret_v_f16m2_i16m2(vfloat16m2_t src) { - return vreinterpret_v_f16m2_i16m2(src); + return __riscv_vreinterpret_v_f16m2_i16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_i16m4( @@ -365,7 +365,7 @@ vint16m2_t test_vreinterpret_v_f16m2_i16m2(vfloat16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vreinterpret_v_f16m4_i16m4(vfloat16m4_t src) { - return vreinterpret_v_f16m4_i16m4(src); + return __riscv_vreinterpret_v_f16m4_i16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_i16m8( @@ -374,7 +374,7 @@ vint16m4_t test_vreinterpret_v_f16m4_i16m4(vfloat16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vreinterpret_v_f16m8_i16m8(vfloat16m8_t src) { - return vreinterpret_v_f16m8_i16m8(src); + return __riscv_vreinterpret_v_f16m8_i16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_u16mf4( @@ -383,7 +383,7 @@ vint16m8_t test_vreinterpret_v_f16m8_i16m8(vfloat16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vreinterpret_v_f16mf4_u16mf4(vfloat16mf4_t src) { - return vreinterpret_v_f16mf4_u16mf4(src); + return __riscv_vreinterpret_v_f16mf4_u16mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_u16mf2( @@ -392,7 +392,7 @@ vuint16mf4_t test_vreinterpret_v_f16mf4_u16mf4(vfloat16mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vreinterpret_v_f16mf2_u16mf2(vfloat16mf2_t src) { - return vreinterpret_v_f16mf2_u16mf2(src); + return __riscv_vreinterpret_v_f16mf2_u16mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_u16m1( @@ -401,7 +401,7 @@ vuint16mf2_t test_vreinterpret_v_f16mf2_u16mf2(vfloat16mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vreinterpret_v_f16m1_u16m1(vfloat16m1_t src) { - return vreinterpret_v_f16m1_u16m1(src); + return __riscv_vreinterpret_v_f16m1_u16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_u16m2( @@ -410,7 +410,7 @@ vuint16m1_t test_vreinterpret_v_f16m1_u16m1(vfloat16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vreinterpret_v_f16m2_u16m2(vfloat16m2_t src) { - return vreinterpret_v_f16m2_u16m2(src); + return __riscv_vreinterpret_v_f16m2_u16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_u16m4( @@ -419,7 +419,7 @@ vuint16m2_t test_vreinterpret_v_f16m2_u16m2(vfloat16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vreinterpret_v_f16m4_u16m4(vfloat16m4_t src) { - return vreinterpret_v_f16m4_u16m4(src); + return __riscv_vreinterpret_v_f16m4_u16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_u16m8( @@ -428,7 +428,7 @@ vuint16m4_t test_vreinterpret_v_f16m4_u16m4(vfloat16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vreinterpret_v_f16m8_u16m8(vfloat16m8_t src) { - return vreinterpret_v_f16m8_u16m8(src); + return __riscv_vreinterpret_v_f16m8_u16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_f32mf2( @@ -437,7 +437,7 @@ vuint16m8_t test_vreinterpret_v_f16m8_u16m8(vfloat16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vreinterpret_v_i32mf2_f32mf2(vint32mf2_t src) { - return vreinterpret_v_i32mf2_f32mf2(src); + return __riscv_vreinterpret_v_i32mf2_f32mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_f32m1( @@ -446,7 +446,7 @@ vfloat32mf2_t test_vreinterpret_v_i32mf2_f32mf2(vint32mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vreinterpret_v_i32m1_f32m1(vint32m1_t src) { - return vreinterpret_v_i32m1_f32m1(src); + return __riscv_vreinterpret_v_i32m1_f32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_f32m2( @@ -455,7 +455,7 @@ vfloat32m1_t test_vreinterpret_v_i32m1_f32m1(vint32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vreinterpret_v_i32m2_f32m2(vint32m2_t src) { - return vreinterpret_v_i32m2_f32m2(src); + return __riscv_vreinterpret_v_i32m2_f32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_f32m4( @@ -464,7 +464,7 @@ vfloat32m2_t test_vreinterpret_v_i32m2_f32m2(vint32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vreinterpret_v_i32m4_f32m4(vint32m4_t src) { - return vreinterpret_v_i32m4_f32m4(src); + return __riscv_vreinterpret_v_i32m4_f32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_f32m8( @@ -473,7 +473,7 @@ vfloat32m4_t test_vreinterpret_v_i32m4_f32m4(vint32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vreinterpret_v_i32m8_f32m8(vint32m8_t src) { - return vreinterpret_v_i32m8_f32m8(src); + return __riscv_vreinterpret_v_i32m8_f32m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_f32mf2( @@ -482,7 +482,7 @@ vfloat32m8_t test_vreinterpret_v_i32m8_f32m8(vint32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vreinterpret_v_u32mf2_f32mf2(vuint32mf2_t src) { - return vreinterpret_v_u32mf2_f32mf2(src); + return __riscv_vreinterpret_v_u32mf2_f32mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_f32m1( @@ -491,7 +491,7 @@ vfloat32mf2_t test_vreinterpret_v_u32mf2_f32mf2(vuint32mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vreinterpret_v_u32m1_f32m1(vuint32m1_t src) { - return vreinterpret_v_u32m1_f32m1(src); + return __riscv_vreinterpret_v_u32m1_f32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_f32m2( @@ -500,7 +500,7 @@ vfloat32m1_t test_vreinterpret_v_u32m1_f32m1(vuint32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vreinterpret_v_u32m2_f32m2(vuint32m2_t src) { - return vreinterpret_v_u32m2_f32m2(src); + return __riscv_vreinterpret_v_u32m2_f32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_f32m4( @@ -509,7 +509,7 @@ vfloat32m2_t test_vreinterpret_v_u32m2_f32m2(vuint32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vreinterpret_v_u32m4_f32m4(vuint32m4_t src) { - return vreinterpret_v_u32m4_f32m4(src); + return __riscv_vreinterpret_v_u32m4_f32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_f32m8( @@ -518,7 +518,7 @@ vfloat32m4_t test_vreinterpret_v_u32m4_f32m4(vuint32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vreinterpret_v_u32m8_f32m8(vuint32m8_t src) { - return vreinterpret_v_u32m8_f32m8(src); + return __riscv_vreinterpret_v_u32m8_f32m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_u32mf2( @@ -526,7 +526,7 @@ vfloat32m8_t test_vreinterpret_v_u32m8_f32m8(vuint32m8_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint32mf2_t test_vreinterpret_v_i32mf2_u32mf2(vint32mf2_t src) { - return vreinterpret_v_i32mf2_u32mf2(src); + return __riscv_vreinterpret_v_i32mf2_u32mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_u32m1( @@ -534,7 +534,7 @@ vuint32mf2_t test_vreinterpret_v_i32mf2_u32mf2(vint32mf2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint32m1_t test_vreinterpret_v_i32m1_u32m1(vint32m1_t src) { - return vreinterpret_v_i32m1_u32m1(src); + return __riscv_vreinterpret_v_i32m1_u32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_u32m2( @@ -542,7 +542,7 @@ vuint32m1_t test_vreinterpret_v_i32m1_u32m1(vint32m1_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint32m2_t test_vreinterpret_v_i32m2_u32m2(vint32m2_t src) { - return vreinterpret_v_i32m2_u32m2(src); + return __riscv_vreinterpret_v_i32m2_u32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_u32m4( @@ -550,7 +550,7 @@ vuint32m2_t test_vreinterpret_v_i32m2_u32m2(vint32m2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint32m4_t test_vreinterpret_v_i32m4_u32m4(vint32m4_t src) { - return vreinterpret_v_i32m4_u32m4(src); + return __riscv_vreinterpret_v_i32m4_u32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_u32m8( @@ -558,7 +558,7 @@ vuint32m4_t test_vreinterpret_v_i32m4_u32m4(vint32m4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint32m8_t test_vreinterpret_v_i32m8_u32m8(vint32m8_t src) { - return vreinterpret_v_i32m8_u32m8(src); + return __riscv_vreinterpret_v_i32m8_u32m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_i32mf2( @@ -566,7 +566,7 @@ vuint32m8_t test_vreinterpret_v_i32m8_u32m8(vint32m8_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint32mf2_t test_vreinterpret_v_u32mf2_i32mf2(vuint32mf2_t src) { - return vreinterpret_v_u32mf2_i32mf2(src); + return __riscv_vreinterpret_v_u32mf2_i32mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_i32m1( @@ -574,7 +574,7 @@ vint32mf2_t test_vreinterpret_v_u32mf2_i32mf2(vuint32mf2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint32m1_t test_vreinterpret_v_u32m1_i32m1(vuint32m1_t src) { - return vreinterpret_v_u32m1_i32m1(src); + return __riscv_vreinterpret_v_u32m1_i32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_i32m2( @@ -582,7 +582,7 @@ vint32m1_t test_vreinterpret_v_u32m1_i32m1(vuint32m1_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint32m2_t test_vreinterpret_v_u32m2_i32m2(vuint32m2_t src) { - return vreinterpret_v_u32m2_i32m2(src); + return __riscv_vreinterpret_v_u32m2_i32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_i32m4( @@ -590,7 +590,7 @@ vint32m2_t test_vreinterpret_v_u32m2_i32m2(vuint32m2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint32m4_t test_vreinterpret_v_u32m4_i32m4(vuint32m4_t src) { - return vreinterpret_v_u32m4_i32m4(src); + return __riscv_vreinterpret_v_u32m4_i32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_i32m8( @@ -598,7 +598,7 @@ vint32m4_t test_vreinterpret_v_u32m4_i32m4(vuint32m4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint32m8_t test_vreinterpret_v_u32m8_i32m8(vuint32m8_t src) { - return vreinterpret_v_u32m8_i32m8(src); + return __riscv_vreinterpret_v_u32m8_i32m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_i32mf2( @@ -607,7 +607,7 @@ vint32m8_t test_vreinterpret_v_u32m8_i32m8(vuint32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vreinterpret_v_f32mf2_i32mf2(vfloat32mf2_t src) { - return vreinterpret_v_f32mf2_i32mf2(src); + return __riscv_vreinterpret_v_f32mf2_i32mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_i32m1( @@ -616,7 +616,7 @@ vint32mf2_t test_vreinterpret_v_f32mf2_i32mf2(vfloat32mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vreinterpret_v_f32m1_i32m1(vfloat32m1_t src) { - return vreinterpret_v_f32m1_i32m1(src); + return __riscv_vreinterpret_v_f32m1_i32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_i32m2( @@ -625,7 +625,7 @@ vint32m1_t test_vreinterpret_v_f32m1_i32m1(vfloat32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vreinterpret_v_f32m2_i32m2(vfloat32m2_t src) { - return vreinterpret_v_f32m2_i32m2(src); + return __riscv_vreinterpret_v_f32m2_i32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_i32m4( @@ -634,7 +634,7 @@ vint32m2_t test_vreinterpret_v_f32m2_i32m2(vfloat32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vreinterpret_v_f32m4_i32m4(vfloat32m4_t src) { - return vreinterpret_v_f32m4_i32m4(src); + return __riscv_vreinterpret_v_f32m4_i32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_i32m8( @@ -643,7 +643,7 @@ vint32m4_t test_vreinterpret_v_f32m4_i32m4(vfloat32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vreinterpret_v_f32m8_i32m8(vfloat32m8_t src) { - return vreinterpret_v_f32m8_i32m8(src); + return __riscv_vreinterpret_v_f32m8_i32m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_u32mf2( @@ -652,7 +652,7 @@ vint32m8_t test_vreinterpret_v_f32m8_i32m8(vfloat32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vreinterpret_v_f32mf2_u32mf2(vfloat32mf2_t src) { - return vreinterpret_v_f32mf2_u32mf2(src); + return __riscv_vreinterpret_v_f32mf2_u32mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_u32m1( @@ -661,7 +661,7 @@ vuint32mf2_t test_vreinterpret_v_f32mf2_u32mf2(vfloat32mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vreinterpret_v_f32m1_u32m1(vfloat32m1_t src) { - return vreinterpret_v_f32m1_u32m1(src); + return __riscv_vreinterpret_v_f32m1_u32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_u32m2( @@ -670,7 +670,7 @@ vuint32m1_t test_vreinterpret_v_f32m1_u32m1(vfloat32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vreinterpret_v_f32m2_u32m2(vfloat32m2_t src) { - return vreinterpret_v_f32m2_u32m2(src); + return __riscv_vreinterpret_v_f32m2_u32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_u32m4( @@ -679,7 +679,7 @@ vuint32m2_t test_vreinterpret_v_f32m2_u32m2(vfloat32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vreinterpret_v_f32m4_u32m4(vfloat32m4_t src) { - return vreinterpret_v_f32m4_u32m4(src); + return __riscv_vreinterpret_v_f32m4_u32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_u32m8( @@ -688,7 +688,7 @@ vuint32m4_t test_vreinterpret_v_f32m4_u32m4(vfloat32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vreinterpret_v_f32m8_u32m8(vfloat32m8_t src) { - return vreinterpret_v_f32m8_u32m8(src); + return __riscv_vreinterpret_v_f32m8_u32m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_f64m1( @@ -697,7 +697,7 @@ vuint32m8_t test_vreinterpret_v_f32m8_u32m8(vfloat32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vreinterpret_v_i64m1_f64m1(vint64m1_t src) { - return vreinterpret_v_i64m1_f64m1(src); + return __riscv_vreinterpret_v_i64m1_f64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_f64m2( @@ -706,7 +706,7 @@ vfloat64m1_t test_vreinterpret_v_i64m1_f64m1(vint64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vreinterpret_v_i64m2_f64m2(vint64m2_t src) { - return vreinterpret_v_i64m2_f64m2(src); + return __riscv_vreinterpret_v_i64m2_f64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_f64m4( @@ -715,7 +715,7 @@ vfloat64m2_t test_vreinterpret_v_i64m2_f64m2(vint64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vreinterpret_v_i64m4_f64m4(vint64m4_t src) { - return vreinterpret_v_i64m4_f64m4(src); + return __riscv_vreinterpret_v_i64m4_f64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_f64m8( @@ -724,7 +724,7 @@ vfloat64m4_t test_vreinterpret_v_i64m4_f64m4(vint64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vreinterpret_v_i64m8_f64m8(vint64m8_t src) { - return vreinterpret_v_i64m8_f64m8(src); + return __riscv_vreinterpret_v_i64m8_f64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_f64m1( @@ -733,7 +733,7 @@ vfloat64m8_t test_vreinterpret_v_i64m8_f64m8(vint64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vreinterpret_v_u64m1_f64m1(vuint64m1_t src) { - return vreinterpret_v_u64m1_f64m1(src); + return __riscv_vreinterpret_v_u64m1_f64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_f64m2( @@ -742,7 +742,7 @@ vfloat64m1_t test_vreinterpret_v_u64m1_f64m1(vuint64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vreinterpret_v_u64m2_f64m2(vuint64m2_t src) { - return vreinterpret_v_u64m2_f64m2(src); + return __riscv_vreinterpret_v_u64m2_f64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_f64m4( @@ -751,7 +751,7 @@ vfloat64m2_t test_vreinterpret_v_u64m2_f64m2(vuint64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vreinterpret_v_u64m4_f64m4(vuint64m4_t src) { - return vreinterpret_v_u64m4_f64m4(src); + return __riscv_vreinterpret_v_u64m4_f64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_f64m8( @@ -760,7 +760,7 @@ vfloat64m4_t test_vreinterpret_v_u64m4_f64m4(vuint64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vreinterpret_v_u64m8_f64m8(vuint64m8_t src) { - return vreinterpret_v_u64m8_f64m8(src); + return __riscv_vreinterpret_v_u64m8_f64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_u64m1( @@ -768,7 +768,7 @@ vfloat64m8_t test_vreinterpret_v_u64m8_f64m8(vuint64m8_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint64m1_t test_vreinterpret_v_i64m1_u64m1(vint64m1_t src) { - return vreinterpret_v_i64m1_u64m1(src); + return __riscv_vreinterpret_v_i64m1_u64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_u64m2( @@ -776,7 +776,7 @@ vuint64m1_t test_vreinterpret_v_i64m1_u64m1(vint64m1_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint64m2_t test_vreinterpret_v_i64m2_u64m2(vint64m2_t src) { - return vreinterpret_v_i64m2_u64m2(src); + return __riscv_vreinterpret_v_i64m2_u64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_u64m4( @@ -784,7 +784,7 @@ vuint64m2_t test_vreinterpret_v_i64m2_u64m2(vint64m2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint64m4_t test_vreinterpret_v_i64m4_u64m4(vint64m4_t src) { - return vreinterpret_v_i64m4_u64m4(src); + return __riscv_vreinterpret_v_i64m4_u64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_u64m8( @@ -792,7 +792,7 @@ vuint64m4_t test_vreinterpret_v_i64m4_u64m4(vint64m4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vuint64m8_t test_vreinterpret_v_i64m8_u64m8(vint64m8_t src) { - return vreinterpret_v_i64m8_u64m8(src); + return __riscv_vreinterpret_v_i64m8_u64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_i64m1( @@ -800,7 +800,7 @@ vuint64m8_t test_vreinterpret_v_i64m8_u64m8(vint64m8_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint64m1_t test_vreinterpret_v_u64m1_i64m1(vuint64m1_t src) { - return vreinterpret_v_u64m1_i64m1(src); + return __riscv_vreinterpret_v_u64m1_i64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_i64m2( @@ -808,7 +808,7 @@ vint64m1_t test_vreinterpret_v_u64m1_i64m1(vuint64m1_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint64m2_t test_vreinterpret_v_u64m2_i64m2(vuint64m2_t src) { - return vreinterpret_v_u64m2_i64m2(src); + return __riscv_vreinterpret_v_u64m2_i64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_i64m4( @@ -816,7 +816,7 @@ vint64m2_t test_vreinterpret_v_u64m2_i64m2(vuint64m2_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint64m4_t test_vreinterpret_v_u64m4_i64m4(vuint64m4_t src) { - return vreinterpret_v_u64m4_i64m4(src); + return __riscv_vreinterpret_v_u64m4_i64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_i64m8( @@ -824,7 +824,7 @@ vint64m4_t test_vreinterpret_v_u64m4_i64m4(vuint64m4_t src) { // CHECK-RV64-NEXT: ret [[SRC:%.*]] // vint64m8_t test_vreinterpret_v_u64m8_i64m8(vuint64m8_t src) { - return vreinterpret_v_u64m8_i64m8(src); + return __riscv_vreinterpret_v_u64m8_i64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_i64m1( @@ -833,7 +833,7 @@ vint64m8_t test_vreinterpret_v_u64m8_i64m8(vuint64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vreinterpret_v_f64m1_i64m1(vfloat64m1_t src) { - return vreinterpret_v_f64m1_i64m1(src); + return __riscv_vreinterpret_v_f64m1_i64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_i64m2( @@ -842,7 +842,7 @@ vint64m1_t test_vreinterpret_v_f64m1_i64m1(vfloat64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vreinterpret_v_f64m2_i64m2(vfloat64m2_t src) { - return vreinterpret_v_f64m2_i64m2(src); + return __riscv_vreinterpret_v_f64m2_i64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_i64m4( @@ -851,7 +851,7 @@ vint64m2_t test_vreinterpret_v_f64m2_i64m2(vfloat64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vreinterpret_v_f64m4_i64m4(vfloat64m4_t src) { - return vreinterpret_v_f64m4_i64m4(src); + return __riscv_vreinterpret_v_f64m4_i64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_i64m8( @@ -860,7 +860,7 @@ vint64m4_t test_vreinterpret_v_f64m4_i64m4(vfloat64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vreinterpret_v_f64m8_i64m8(vfloat64m8_t src) { - return vreinterpret_v_f64m8_i64m8(src); + return __riscv_vreinterpret_v_f64m8_i64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_u64m1( @@ -869,7 +869,7 @@ vint64m8_t test_vreinterpret_v_f64m8_i64m8(vfloat64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vreinterpret_v_f64m1_u64m1(vfloat64m1_t src) { - return vreinterpret_v_f64m1_u64m1(src); + return __riscv_vreinterpret_v_f64m1_u64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_u64m2( @@ -878,7 +878,7 @@ vuint64m1_t test_vreinterpret_v_f64m1_u64m1(vfloat64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vreinterpret_v_f64m2_u64m2(vfloat64m2_t src) { - return vreinterpret_v_f64m2_u64m2(src); + return __riscv_vreinterpret_v_f64m2_u64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_u64m4( @@ -887,7 +887,7 @@ vuint64m2_t test_vreinterpret_v_f64m2_u64m2(vfloat64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vreinterpret_v_f64m4_u64m4(vfloat64m4_t src) { - return vreinterpret_v_f64m4_u64m4(src); + return __riscv_vreinterpret_v_f64m4_u64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_u64m8( @@ -896,7 +896,7 @@ vuint64m4_t test_vreinterpret_v_f64m4_u64m4(vfloat64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vreinterpret_v_f64m8_u64m8(vfloat64m8_t src) { - return vreinterpret_v_f64m8_u64m8(src); + return __riscv_vreinterpret_v_f64m8_u64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_i16mf4( @@ -905,7 +905,7 @@ vuint64m8_t test_vreinterpret_v_f64m8_u64m8(vfloat64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vreinterpret_v_i8mf4_i16mf4(vint8mf4_t src) { - return vreinterpret_v_i8mf4_i16mf4(src); + return __riscv_vreinterpret_v_i8mf4_i16mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i16mf2( @@ -914,7 +914,7 @@ vint16mf4_t test_vreinterpret_v_i8mf4_i16mf4(vint8mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vreinterpret_v_i8mf2_i16mf2(vint8mf2_t src) { - return vreinterpret_v_i8mf2_i16mf2(src); + return __riscv_vreinterpret_v_i8mf2_i16mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i16m1( @@ -923,7 +923,7 @@ vint16mf2_t test_vreinterpret_v_i8mf2_i16mf2(vint8mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vreinterpret_v_i8m1_i16m1(vint8m1_t src) { - return vreinterpret_v_i8m1_i16m1(src); + return __riscv_vreinterpret_v_i8m1_i16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i16m2( @@ -932,7 +932,7 @@ vint16m1_t test_vreinterpret_v_i8m1_i16m1(vint8m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vreinterpret_v_i8m2_i16m2(vint8m2_t src) { - return vreinterpret_v_i8m2_i16m2(src); + return __riscv_vreinterpret_v_i8m2_i16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i16m4( @@ -941,7 +941,7 @@ vint16m2_t test_vreinterpret_v_i8m2_i16m2(vint8m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vreinterpret_v_i8m4_i16m4(vint8m4_t src) { - return vreinterpret_v_i8m4_i16m4(src); + return __riscv_vreinterpret_v_i8m4_i16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i16m8( @@ -950,7 +950,7 @@ vint16m4_t test_vreinterpret_v_i8m4_i16m4(vint8m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vreinterpret_v_i8m8_i16m8(vint8m8_t src) { - return vreinterpret_v_i8m8_i16m8(src); + return __riscv_vreinterpret_v_i8m8_i16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_u16mf4( @@ -959,7 +959,7 @@ vint16m8_t test_vreinterpret_v_i8m8_i16m8(vint8m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vreinterpret_v_u8mf4_u16mf4(vuint8mf4_t src) { - return vreinterpret_v_u8mf4_u16mf4(src); + return __riscv_vreinterpret_v_u8mf4_u16mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u16mf2( @@ -968,7 +968,7 @@ vuint16mf4_t test_vreinterpret_v_u8mf4_u16mf4(vuint8mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vreinterpret_v_u8mf2_u16mf2(vuint8mf2_t src) { - return vreinterpret_v_u8mf2_u16mf2(src); + return __riscv_vreinterpret_v_u8mf2_u16mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u16m1( @@ -977,7 +977,7 @@ vuint16mf2_t test_vreinterpret_v_u8mf2_u16mf2(vuint8mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vreinterpret_v_u8m1_u16m1(vuint8m1_t src) { - return vreinterpret_v_u8m1_u16m1(src); + return __riscv_vreinterpret_v_u8m1_u16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u16m2( @@ -986,7 +986,7 @@ vuint16m1_t test_vreinterpret_v_u8m1_u16m1(vuint8m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vreinterpret_v_u8m2_u16m2(vuint8m2_t src) { - return vreinterpret_v_u8m2_u16m2(src); + return __riscv_vreinterpret_v_u8m2_u16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u16m4( @@ -995,7 +995,7 @@ vuint16m2_t test_vreinterpret_v_u8m2_u16m2(vuint8m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vreinterpret_v_u8m4_u16m4(vuint8m4_t src) { - return vreinterpret_v_u8m4_u16m4(src); + return __riscv_vreinterpret_v_u8m4_u16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u16m8( @@ -1004,7 +1004,7 @@ vuint16m4_t test_vreinterpret_v_u8m4_u16m4(vuint8m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vreinterpret_v_u8m8_u16m8(vuint8m8_t src) { - return vreinterpret_v_u8m8_u16m8(src); + return __riscv_vreinterpret_v_u8m8_u16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i32mf2( @@ -1013,7 +1013,7 @@ vuint16m8_t test_vreinterpret_v_u8m8_u16m8(vuint8m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vreinterpret_v_i8mf2_i32mf2(vint8mf2_t src) { - return vreinterpret_v_i8mf2_i32mf2(src); + return __riscv_vreinterpret_v_i8mf2_i32mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i32m1( @@ -1022,7 +1022,7 @@ vint32mf2_t test_vreinterpret_v_i8mf2_i32mf2(vint8mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vreinterpret_v_i8m1_i32m1(vint8m1_t src) { - return vreinterpret_v_i8m1_i32m1(src); + return __riscv_vreinterpret_v_i8m1_i32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i32m2( @@ -1031,7 +1031,7 @@ vint32m1_t test_vreinterpret_v_i8m1_i32m1(vint8m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vreinterpret_v_i8m2_i32m2(vint8m2_t src) { - return vreinterpret_v_i8m2_i32m2(src); + return __riscv_vreinterpret_v_i8m2_i32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i32m4( @@ -1040,7 +1040,7 @@ vint32m2_t test_vreinterpret_v_i8m2_i32m2(vint8m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vreinterpret_v_i8m4_i32m4(vint8m4_t src) { - return vreinterpret_v_i8m4_i32m4(src); + return __riscv_vreinterpret_v_i8m4_i32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i32m8( @@ -1049,7 +1049,7 @@ vint32m4_t test_vreinterpret_v_i8m4_i32m4(vint8m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vreinterpret_v_i8m8_i32m8(vint8m8_t src) { - return vreinterpret_v_i8m8_i32m8(src); + return __riscv_vreinterpret_v_i8m8_i32m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u32mf2( @@ -1058,7 +1058,7 @@ vint32m8_t test_vreinterpret_v_i8m8_i32m8(vint8m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vreinterpret_v_u8mf2_u32mf2(vuint8mf2_t src) { - return vreinterpret_v_u8mf2_u32mf2(src); + return __riscv_vreinterpret_v_u8mf2_u32mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u32m1( @@ -1067,7 +1067,7 @@ vuint32mf2_t test_vreinterpret_v_u8mf2_u32mf2(vuint8mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vreinterpret_v_u8m1_u32m1(vuint8m1_t src) { - return vreinterpret_v_u8m1_u32m1(src); + return __riscv_vreinterpret_v_u8m1_u32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u32m2( @@ -1076,7 +1076,7 @@ vuint32m1_t test_vreinterpret_v_u8m1_u32m1(vuint8m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vreinterpret_v_u8m2_u32m2(vuint8m2_t src) { - return vreinterpret_v_u8m2_u32m2(src); + return __riscv_vreinterpret_v_u8m2_u32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u32m4( @@ -1085,7 +1085,7 @@ vuint32m2_t test_vreinterpret_v_u8m2_u32m2(vuint8m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vreinterpret_v_u8m4_u32m4(vuint8m4_t src) { - return vreinterpret_v_u8m4_u32m4(src); + return __riscv_vreinterpret_v_u8m4_u32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u32m8( @@ -1094,7 +1094,7 @@ vuint32m4_t test_vreinterpret_v_u8m4_u32m4(vuint8m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vreinterpret_v_u8m8_u32m8(vuint8m8_t src) { - return vreinterpret_v_u8m8_u32m8(src); + return __riscv_vreinterpret_v_u8m8_u32m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i64m1( @@ -1103,7 +1103,7 @@ vuint32m8_t test_vreinterpret_v_u8m8_u32m8(vuint8m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vreinterpret_v_i8m1_i64m1(vint8m1_t src) { - return vreinterpret_v_i8m1_i64m1(src); + return __riscv_vreinterpret_v_i8m1_i64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i64m2( @@ -1112,7 +1112,7 @@ vint64m1_t test_vreinterpret_v_i8m1_i64m1(vint8m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vreinterpret_v_i8m2_i64m2(vint8m2_t src) { - return vreinterpret_v_i8m2_i64m2(src); + return __riscv_vreinterpret_v_i8m2_i64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i64m4( @@ -1121,7 +1121,7 @@ vint64m2_t test_vreinterpret_v_i8m2_i64m2(vint8m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vreinterpret_v_i8m4_i64m4(vint8m4_t src) { - return vreinterpret_v_i8m4_i64m4(src); + return __riscv_vreinterpret_v_i8m4_i64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i64m8( @@ -1130,7 +1130,7 @@ vint64m4_t test_vreinterpret_v_i8m4_i64m4(vint8m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vreinterpret_v_i8m8_i64m8(vint8m8_t src) { - return vreinterpret_v_i8m8_i64m8(src); + return __riscv_vreinterpret_v_i8m8_i64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u64m1( @@ -1139,7 +1139,7 @@ vint64m8_t test_vreinterpret_v_i8m8_i64m8(vint8m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vreinterpret_v_u8m1_u64m1(vuint8m1_t src) { - return vreinterpret_v_u8m1_u64m1(src); + return __riscv_vreinterpret_v_u8m1_u64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u64m2( @@ -1148,7 +1148,7 @@ vuint64m1_t test_vreinterpret_v_u8m1_u64m1(vuint8m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vreinterpret_v_u8m2_u64m2(vuint8m2_t src) { - return vreinterpret_v_u8m2_u64m2(src); + return __riscv_vreinterpret_v_u8m2_u64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u64m4( @@ -1157,7 +1157,7 @@ vuint64m2_t test_vreinterpret_v_u8m2_u64m2(vuint8m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vreinterpret_v_u8m4_u64m4(vuint8m4_t src) { - return vreinterpret_v_u8m4_u64m4(src); + return __riscv_vreinterpret_v_u8m4_u64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u64m8( @@ -1166,7 +1166,7 @@ vuint64m4_t test_vreinterpret_v_u8m4_u64m4(vuint8m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vreinterpret_v_u8m8_u64m8(vuint8m8_t src) { - return vreinterpret_v_u8m8_u64m8(src); + return __riscv_vreinterpret_v_u8m8_u64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_i8mf4( @@ -1175,7 +1175,7 @@ vuint64m8_t test_vreinterpret_v_u8m8_u64m8(vuint8m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vreinterpret_v_i16mf4_i8mf4(vint16mf4_t src) { - return vreinterpret_v_i16mf4_i8mf4(src); + return __riscv_vreinterpret_v_i16mf4_i8mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i8mf2( @@ -1184,7 +1184,7 @@ vint8mf4_t test_vreinterpret_v_i16mf4_i8mf4(vint16mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vreinterpret_v_i16mf2_i8mf2(vint16mf2_t src) { - return vreinterpret_v_i16mf2_i8mf2(src); + return __riscv_vreinterpret_v_i16mf2_i8mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i8m1( @@ -1193,7 +1193,7 @@ vint8mf2_t test_vreinterpret_v_i16mf2_i8mf2(vint16mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vreinterpret_v_i16m1_i8m1(vint16m1_t src) { - return vreinterpret_v_i16m1_i8m1(src); + return __riscv_vreinterpret_v_i16m1_i8m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i8m2( @@ -1202,7 +1202,7 @@ vint8m1_t test_vreinterpret_v_i16m1_i8m1(vint16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vreinterpret_v_i16m2_i8m2(vint16m2_t src) { - return vreinterpret_v_i16m2_i8m2(src); + return __riscv_vreinterpret_v_i16m2_i8m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i8m4( @@ -1211,7 +1211,7 @@ vint8m2_t test_vreinterpret_v_i16m2_i8m2(vint16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vreinterpret_v_i16m4_i8m4(vint16m4_t src) { - return vreinterpret_v_i16m4_i8m4(src); + return __riscv_vreinterpret_v_i16m4_i8m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i8m8( @@ -1220,7 +1220,7 @@ vint8m4_t test_vreinterpret_v_i16m4_i8m4(vint16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vreinterpret_v_i16m8_i8m8(vint16m8_t src) { - return vreinterpret_v_i16m8_i8m8(src); + return __riscv_vreinterpret_v_i16m8_i8m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_u8mf4( @@ -1229,7 +1229,7 @@ vint8m8_t test_vreinterpret_v_i16m8_i8m8(vint16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vreinterpret_v_u16mf4_u8mf4(vuint16mf4_t src) { - return vreinterpret_v_u16mf4_u8mf4(src); + return __riscv_vreinterpret_v_u16mf4_u8mf4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u8mf2( @@ -1238,7 +1238,7 @@ vuint8mf4_t test_vreinterpret_v_u16mf4_u8mf4(vuint16mf4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vreinterpret_v_u16mf2_u8mf2(vuint16mf2_t src) { - return vreinterpret_v_u16mf2_u8mf2(src); + return __riscv_vreinterpret_v_u16mf2_u8mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u8m1( @@ -1247,7 +1247,7 @@ vuint8mf2_t test_vreinterpret_v_u16mf2_u8mf2(vuint16mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vreinterpret_v_u16m1_u8m1(vuint16m1_t src) { - return vreinterpret_v_u16m1_u8m1(src); + return __riscv_vreinterpret_v_u16m1_u8m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u8m2( @@ -1256,7 +1256,7 @@ vuint8m1_t test_vreinterpret_v_u16m1_u8m1(vuint16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vreinterpret_v_u16m2_u8m2(vuint16m2_t src) { - return vreinterpret_v_u16m2_u8m2(src); + return __riscv_vreinterpret_v_u16m2_u8m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u8m4( @@ -1265,7 +1265,7 @@ vuint8m2_t test_vreinterpret_v_u16m2_u8m2(vuint16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vreinterpret_v_u16m4_u8m4(vuint16m4_t src) { - return vreinterpret_v_u16m4_u8m4(src); + return __riscv_vreinterpret_v_u16m4_u8m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u8m8( @@ -1274,7 +1274,7 @@ vuint8m4_t test_vreinterpret_v_u16m4_u8m4(vuint16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vreinterpret_v_u16m8_u8m8(vuint16m8_t src) { - return vreinterpret_v_u16m8_u8m8(src); + return __riscv_vreinterpret_v_u16m8_u8m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i32mf2( @@ -1283,7 +1283,7 @@ vuint8m8_t test_vreinterpret_v_u16m8_u8m8(vuint16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vreinterpret_v_i16mf2_i32mf2(vint16mf2_t src) { - return vreinterpret_v_i16mf2_i32mf2(src); + return __riscv_vreinterpret_v_i16mf2_i32mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i32m1( @@ -1292,7 +1292,7 @@ vint32mf2_t test_vreinterpret_v_i16mf2_i32mf2(vint16mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vreinterpret_v_i16m1_i32m1(vint16m1_t src) { - return vreinterpret_v_i16m1_i32m1(src); + return __riscv_vreinterpret_v_i16m1_i32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i32m2( @@ -1301,7 +1301,7 @@ vint32m1_t test_vreinterpret_v_i16m1_i32m1(vint16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vreinterpret_v_i16m2_i32m2(vint16m2_t src) { - return vreinterpret_v_i16m2_i32m2(src); + return __riscv_vreinterpret_v_i16m2_i32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i32m4( @@ -1310,7 +1310,7 @@ vint32m2_t test_vreinterpret_v_i16m2_i32m2(vint16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vreinterpret_v_i16m4_i32m4(vint16m4_t src) { - return vreinterpret_v_i16m4_i32m4(src); + return __riscv_vreinterpret_v_i16m4_i32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i32m8( @@ -1319,7 +1319,7 @@ vint32m4_t test_vreinterpret_v_i16m4_i32m4(vint16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vreinterpret_v_i16m8_i32m8(vint16m8_t src) { - return vreinterpret_v_i16m8_i32m8(src); + return __riscv_vreinterpret_v_i16m8_i32m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u32mf2( @@ -1328,7 +1328,7 @@ vint32m8_t test_vreinterpret_v_i16m8_i32m8(vint16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vreinterpret_v_u16mf2_u32mf2(vuint16mf2_t src) { - return vreinterpret_v_u16mf2_u32mf2(src); + return __riscv_vreinterpret_v_u16mf2_u32mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u32m1( @@ -1337,7 +1337,7 @@ vuint32mf2_t test_vreinterpret_v_u16mf2_u32mf2(vuint16mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vreinterpret_v_u16m1_u32m1(vuint16m1_t src) { - return vreinterpret_v_u16m1_u32m1(src); + return __riscv_vreinterpret_v_u16m1_u32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u32m2( @@ -1346,7 +1346,7 @@ vuint32m1_t test_vreinterpret_v_u16m1_u32m1(vuint16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vreinterpret_v_u16m2_u32m2(vuint16m2_t src) { - return vreinterpret_v_u16m2_u32m2(src); + return __riscv_vreinterpret_v_u16m2_u32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u32m4( @@ -1355,7 +1355,7 @@ vuint32m2_t test_vreinterpret_v_u16m2_u32m2(vuint16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vreinterpret_v_u16m4_u32m4(vuint16m4_t src) { - return vreinterpret_v_u16m4_u32m4(src); + return __riscv_vreinterpret_v_u16m4_u32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u32m8( @@ -1364,7 +1364,7 @@ vuint32m4_t test_vreinterpret_v_u16m4_u32m4(vuint16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vreinterpret_v_u16m8_u32m8(vuint16m8_t src) { - return vreinterpret_v_u16m8_u32m8(src); + return __riscv_vreinterpret_v_u16m8_u32m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i64m1( @@ -1373,7 +1373,7 @@ vuint32m8_t test_vreinterpret_v_u16m8_u32m8(vuint16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vreinterpret_v_i16m1_i64m1(vint16m1_t src) { - return vreinterpret_v_i16m1_i64m1(src); + return __riscv_vreinterpret_v_i16m1_i64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i64m2( @@ -1382,7 +1382,7 @@ vint64m1_t test_vreinterpret_v_i16m1_i64m1(vint16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vreinterpret_v_i16m2_i64m2(vint16m2_t src) { - return vreinterpret_v_i16m2_i64m2(src); + return __riscv_vreinterpret_v_i16m2_i64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i64m4( @@ -1391,7 +1391,7 @@ vint64m2_t test_vreinterpret_v_i16m2_i64m2(vint16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vreinterpret_v_i16m4_i64m4(vint16m4_t src) { - return vreinterpret_v_i16m4_i64m4(src); + return __riscv_vreinterpret_v_i16m4_i64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i64m8( @@ -1400,7 +1400,7 @@ vint64m4_t test_vreinterpret_v_i16m4_i64m4(vint16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vreinterpret_v_i16m8_i64m8(vint16m8_t src) { - return vreinterpret_v_i16m8_i64m8(src); + return __riscv_vreinterpret_v_i16m8_i64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u64m1( @@ -1409,7 +1409,7 @@ vint64m8_t test_vreinterpret_v_i16m8_i64m8(vint16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vreinterpret_v_u16m1_u64m1(vuint16m1_t src) { - return vreinterpret_v_u16m1_u64m1(src); + return __riscv_vreinterpret_v_u16m1_u64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u64m2( @@ -1418,7 +1418,7 @@ vuint64m1_t test_vreinterpret_v_u16m1_u64m1(vuint16m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vreinterpret_v_u16m2_u64m2(vuint16m2_t src) { - return vreinterpret_v_u16m2_u64m2(src); + return __riscv_vreinterpret_v_u16m2_u64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u64m4( @@ -1427,7 +1427,7 @@ vuint64m2_t test_vreinterpret_v_u16m2_u64m2(vuint16m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vreinterpret_v_u16m4_u64m4(vuint16m4_t src) { - return vreinterpret_v_u16m4_u64m4(src); + return __riscv_vreinterpret_v_u16m4_u64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u64m8( @@ -1436,7 +1436,7 @@ vuint64m4_t test_vreinterpret_v_u16m4_u64m4(vuint16m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vreinterpret_v_u16m8_u64m8(vuint16m8_t src) { - return vreinterpret_v_u16m8_u64m8(src); + return __riscv_vreinterpret_v_u16m8_u64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i8mf2( @@ -1445,7 +1445,7 @@ vuint64m8_t test_vreinterpret_v_u16m8_u64m8(vuint16m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vreinterpret_v_i32mf2_i8mf2(vint32mf2_t src) { - return vreinterpret_v_i32mf2_i8mf2(src); + return __riscv_vreinterpret_v_i32mf2_i8mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i8m1( @@ -1454,7 +1454,7 @@ vint8mf2_t test_vreinterpret_v_i32mf2_i8mf2(vint32mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vreinterpret_v_i32m1_i8m1(vint32m1_t src) { - return vreinterpret_v_i32m1_i8m1(src); + return __riscv_vreinterpret_v_i32m1_i8m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i8m2( @@ -1463,7 +1463,7 @@ vint8m1_t test_vreinterpret_v_i32m1_i8m1(vint32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vreinterpret_v_i32m2_i8m2(vint32m2_t src) { - return vreinterpret_v_i32m2_i8m2(src); + return __riscv_vreinterpret_v_i32m2_i8m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i8m4( @@ -1472,7 +1472,7 @@ vint8m2_t test_vreinterpret_v_i32m2_i8m2(vint32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vreinterpret_v_i32m4_i8m4(vint32m4_t src) { - return vreinterpret_v_i32m4_i8m4(src); + return __riscv_vreinterpret_v_i32m4_i8m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i8m8( @@ -1481,7 +1481,7 @@ vint8m4_t test_vreinterpret_v_i32m4_i8m4(vint32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vreinterpret_v_i32m8_i8m8(vint32m8_t src) { - return vreinterpret_v_i32m8_i8m8(src); + return __riscv_vreinterpret_v_i32m8_i8m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u8mf2( @@ -1490,7 +1490,7 @@ vint8m8_t test_vreinterpret_v_i32m8_i8m8(vint32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vreinterpret_v_u32mf2_u8mf2(vuint32mf2_t src) { - return vreinterpret_v_u32mf2_u8mf2(src); + return __riscv_vreinterpret_v_u32mf2_u8mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u8m1( @@ -1499,7 +1499,7 @@ vuint8mf2_t test_vreinterpret_v_u32mf2_u8mf2(vuint32mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vreinterpret_v_u32m1_u8m1(vuint32m1_t src) { - return vreinterpret_v_u32m1_u8m1(src); + return __riscv_vreinterpret_v_u32m1_u8m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u8m2( @@ -1508,7 +1508,7 @@ vuint8m1_t test_vreinterpret_v_u32m1_u8m1(vuint32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vreinterpret_v_u32m2_u8m2(vuint32m2_t src) { - return vreinterpret_v_u32m2_u8m2(src); + return __riscv_vreinterpret_v_u32m2_u8m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u8m4( @@ -1517,7 +1517,7 @@ vuint8m2_t test_vreinterpret_v_u32m2_u8m2(vuint32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vreinterpret_v_u32m4_u8m4(vuint32m4_t src) { - return vreinterpret_v_u32m4_u8m4(src); + return __riscv_vreinterpret_v_u32m4_u8m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u8m8( @@ -1526,7 +1526,7 @@ vuint8m4_t test_vreinterpret_v_u32m4_u8m4(vuint32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vreinterpret_v_u32m8_u8m8(vuint32m8_t src) { - return vreinterpret_v_u32m8_u8m8(src); + return __riscv_vreinterpret_v_u32m8_u8m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i16mf2( @@ -1535,7 +1535,7 @@ vuint8m8_t test_vreinterpret_v_u32m8_u8m8(vuint32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vreinterpret_v_i32mf2_i16mf2(vint32mf2_t src) { - return vreinterpret_v_i32mf2_i16mf2(src); + return __riscv_vreinterpret_v_i32mf2_i16mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i16m1( @@ -1544,7 +1544,7 @@ vint16mf2_t test_vreinterpret_v_i32mf2_i16mf2(vint32mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vreinterpret_v_i32m1_i16m1(vint32m1_t src) { - return vreinterpret_v_i32m1_i16m1(src); + return __riscv_vreinterpret_v_i32m1_i16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i16m2( @@ -1553,7 +1553,7 @@ vint16m1_t test_vreinterpret_v_i32m1_i16m1(vint32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vreinterpret_v_i32m2_i16m2(vint32m2_t src) { - return vreinterpret_v_i32m2_i16m2(src); + return __riscv_vreinterpret_v_i32m2_i16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i16m4( @@ -1562,7 +1562,7 @@ vint16m2_t test_vreinterpret_v_i32m2_i16m2(vint32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vreinterpret_v_i32m4_i16m4(vint32m4_t src) { - return vreinterpret_v_i32m4_i16m4(src); + return __riscv_vreinterpret_v_i32m4_i16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i16m8( @@ -1571,7 +1571,7 @@ vint16m4_t test_vreinterpret_v_i32m4_i16m4(vint32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vreinterpret_v_i32m8_i16m8(vint32m8_t src) { - return vreinterpret_v_i32m8_i16m8(src); + return __riscv_vreinterpret_v_i32m8_i16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u16mf2( @@ -1580,7 +1580,7 @@ vint16m8_t test_vreinterpret_v_i32m8_i16m8(vint32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vreinterpret_v_u32mf2_u16mf2(vuint32mf2_t src) { - return vreinterpret_v_u32mf2_u16mf2(src); + return __riscv_vreinterpret_v_u32mf2_u16mf2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u16m1( @@ -1589,7 +1589,7 @@ vuint16mf2_t test_vreinterpret_v_u32mf2_u16mf2(vuint32mf2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vreinterpret_v_u32m1_u16m1(vuint32m1_t src) { - return vreinterpret_v_u32m1_u16m1(src); + return __riscv_vreinterpret_v_u32m1_u16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u16m2( @@ -1598,7 +1598,7 @@ vuint16m1_t test_vreinterpret_v_u32m1_u16m1(vuint32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vreinterpret_v_u32m2_u16m2(vuint32m2_t src) { - return vreinterpret_v_u32m2_u16m2(src); + return __riscv_vreinterpret_v_u32m2_u16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u16m4( @@ -1607,7 +1607,7 @@ vuint16m2_t test_vreinterpret_v_u32m2_u16m2(vuint32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vreinterpret_v_u32m4_u16m4(vuint32m4_t src) { - return vreinterpret_v_u32m4_u16m4(src); + return __riscv_vreinterpret_v_u32m4_u16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u16m8( @@ -1616,7 +1616,7 @@ vuint16m4_t test_vreinterpret_v_u32m4_u16m4(vuint32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vreinterpret_v_u32m8_u16m8(vuint32m8_t src) { - return vreinterpret_v_u32m8_u16m8(src); + return __riscv_vreinterpret_v_u32m8_u16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i64m1( @@ -1625,7 +1625,7 @@ vuint16m8_t test_vreinterpret_v_u32m8_u16m8(vuint32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vreinterpret_v_i32m1_i64m1(vint32m1_t src) { - return vreinterpret_v_i32m1_i64m1(src); + return __riscv_vreinterpret_v_i32m1_i64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i64m2( @@ -1634,7 +1634,7 @@ vint64m1_t test_vreinterpret_v_i32m1_i64m1(vint32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vreinterpret_v_i32m2_i64m2(vint32m2_t src) { - return vreinterpret_v_i32m2_i64m2(src); + return __riscv_vreinterpret_v_i32m2_i64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i64m4( @@ -1643,7 +1643,7 @@ vint64m2_t test_vreinterpret_v_i32m2_i64m2(vint32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vreinterpret_v_i32m4_i64m4(vint32m4_t src) { - return vreinterpret_v_i32m4_i64m4(src); + return __riscv_vreinterpret_v_i32m4_i64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i64m8( @@ -1652,7 +1652,7 @@ vint64m4_t test_vreinterpret_v_i32m4_i64m4(vint32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vreinterpret_v_i32m8_i64m8(vint32m8_t src) { - return vreinterpret_v_i32m8_i64m8(src); + return __riscv_vreinterpret_v_i32m8_i64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u64m1( @@ -1661,7 +1661,7 @@ vint64m8_t test_vreinterpret_v_i32m8_i64m8(vint32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vreinterpret_v_u32m1_u64m1(vuint32m1_t src) { - return vreinterpret_v_u32m1_u64m1(src); + return __riscv_vreinterpret_v_u32m1_u64m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u64m2( @@ -1670,7 +1670,7 @@ vuint64m1_t test_vreinterpret_v_u32m1_u64m1(vuint32m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vreinterpret_v_u32m2_u64m2(vuint32m2_t src) { - return vreinterpret_v_u32m2_u64m2(src); + return __riscv_vreinterpret_v_u32m2_u64m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u64m4( @@ -1679,7 +1679,7 @@ vuint64m2_t test_vreinterpret_v_u32m2_u64m2(vuint32m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vreinterpret_v_u32m4_u64m4(vuint32m4_t src) { - return vreinterpret_v_u32m4_u64m4(src); + return __riscv_vreinterpret_v_u32m4_u64m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u64m8( @@ -1688,7 +1688,7 @@ vuint64m4_t test_vreinterpret_v_u32m4_u64m4(vuint32m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vreinterpret_v_u32m8_u64m8(vuint32m8_t src) { - return vreinterpret_v_u32m8_u64m8(src); + return __riscv_vreinterpret_v_u32m8_u64m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i8m1( @@ -1697,7 +1697,7 @@ vuint64m8_t test_vreinterpret_v_u32m8_u64m8(vuint32m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vreinterpret_v_i64m1_i8m1(vint64m1_t src) { - return vreinterpret_v_i64m1_i8m1(src); + return __riscv_vreinterpret_v_i64m1_i8m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i8m2( @@ -1706,7 +1706,7 @@ vint8m1_t test_vreinterpret_v_i64m1_i8m1(vint64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vreinterpret_v_i64m2_i8m2(vint64m2_t src) { - return vreinterpret_v_i64m2_i8m2(src); + return __riscv_vreinterpret_v_i64m2_i8m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i8m4( @@ -1715,7 +1715,7 @@ vint8m2_t test_vreinterpret_v_i64m2_i8m2(vint64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vreinterpret_v_i64m4_i8m4(vint64m4_t src) { - return vreinterpret_v_i64m4_i8m4(src); + return __riscv_vreinterpret_v_i64m4_i8m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i8m8( @@ -1724,7 +1724,7 @@ vint8m4_t test_vreinterpret_v_i64m4_i8m4(vint64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vreinterpret_v_i64m8_i8m8(vint64m8_t src) { - return vreinterpret_v_i64m8_i8m8(src); + return __riscv_vreinterpret_v_i64m8_i8m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u8m1( @@ -1733,7 +1733,7 @@ vint8m8_t test_vreinterpret_v_i64m8_i8m8(vint64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vreinterpret_v_u64m1_u8m1(vuint64m1_t src) { - return vreinterpret_v_u64m1_u8m1(src); + return __riscv_vreinterpret_v_u64m1_u8m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u8m2( @@ -1742,7 +1742,7 @@ vuint8m1_t test_vreinterpret_v_u64m1_u8m1(vuint64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vreinterpret_v_u64m2_u8m2(vuint64m2_t src) { - return vreinterpret_v_u64m2_u8m2(src); + return __riscv_vreinterpret_v_u64m2_u8m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u8m4( @@ -1751,7 +1751,7 @@ vuint8m2_t test_vreinterpret_v_u64m2_u8m2(vuint64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vreinterpret_v_u64m4_u8m4(vuint64m4_t src) { - return vreinterpret_v_u64m4_u8m4(src); + return __riscv_vreinterpret_v_u64m4_u8m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u8m8( @@ -1760,7 +1760,7 @@ vuint8m4_t test_vreinterpret_v_u64m4_u8m4(vuint64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vreinterpret_v_u64m8_u8m8(vuint64m8_t src) { - return vreinterpret_v_u64m8_u8m8(src); + return __riscv_vreinterpret_v_u64m8_u8m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i16m1( @@ -1769,7 +1769,7 @@ vuint8m8_t test_vreinterpret_v_u64m8_u8m8(vuint64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vreinterpret_v_i64m1_i16m1(vint64m1_t src) { - return vreinterpret_v_i64m1_i16m1(src); + return __riscv_vreinterpret_v_i64m1_i16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i16m2( @@ -1778,7 +1778,7 @@ vint16m1_t test_vreinterpret_v_i64m1_i16m1(vint64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vreinterpret_v_i64m2_i16m2(vint64m2_t src) { - return vreinterpret_v_i64m2_i16m2(src); + return __riscv_vreinterpret_v_i64m2_i16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i16m4( @@ -1787,7 +1787,7 @@ vint16m2_t test_vreinterpret_v_i64m2_i16m2(vint64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vreinterpret_v_i64m4_i16m4(vint64m4_t src) { - return vreinterpret_v_i64m4_i16m4(src); + return __riscv_vreinterpret_v_i64m4_i16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i16m8( @@ -1796,7 +1796,7 @@ vint16m4_t test_vreinterpret_v_i64m4_i16m4(vint64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vreinterpret_v_i64m8_i16m8(vint64m8_t src) { - return vreinterpret_v_i64m8_i16m8(src); + return __riscv_vreinterpret_v_i64m8_i16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u16m1( @@ -1805,7 +1805,7 @@ vint16m8_t test_vreinterpret_v_i64m8_i16m8(vint64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vreinterpret_v_u64m1_u16m1(vuint64m1_t src) { - return vreinterpret_v_u64m1_u16m1(src); + return __riscv_vreinterpret_v_u64m1_u16m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u16m2( @@ -1814,7 +1814,7 @@ vuint16m1_t test_vreinterpret_v_u64m1_u16m1(vuint64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vreinterpret_v_u64m2_u16m2(vuint64m2_t src) { - return vreinterpret_v_u64m2_u16m2(src); + return __riscv_vreinterpret_v_u64m2_u16m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u16m4( @@ -1823,7 +1823,7 @@ vuint16m2_t test_vreinterpret_v_u64m2_u16m2(vuint64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vreinterpret_v_u64m4_u16m4(vuint64m4_t src) { - return vreinterpret_v_u64m4_u16m4(src); + return __riscv_vreinterpret_v_u64m4_u16m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u16m8( @@ -1832,7 +1832,7 @@ vuint16m4_t test_vreinterpret_v_u64m4_u16m4(vuint64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vreinterpret_v_u64m8_u16m8(vuint64m8_t src) { - return vreinterpret_v_u64m8_u16m8(src); + return __riscv_vreinterpret_v_u64m8_u16m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i32m1( @@ -1841,7 +1841,7 @@ vuint16m8_t test_vreinterpret_v_u64m8_u16m8(vuint64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vreinterpret_v_i64m1_i32m1(vint64m1_t src) { - return vreinterpret_v_i64m1_i32m1(src); + return __riscv_vreinterpret_v_i64m1_i32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i32m2( @@ -1850,7 +1850,7 @@ vint32m1_t test_vreinterpret_v_i64m1_i32m1(vint64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vreinterpret_v_i64m2_i32m2(vint64m2_t src) { - return vreinterpret_v_i64m2_i32m2(src); + return __riscv_vreinterpret_v_i64m2_i32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i32m4( @@ -1859,7 +1859,7 @@ vint32m2_t test_vreinterpret_v_i64m2_i32m2(vint64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vreinterpret_v_i64m4_i32m4(vint64m4_t src) { - return vreinterpret_v_i64m4_i32m4(src); + return __riscv_vreinterpret_v_i64m4_i32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i32m8( @@ -1868,7 +1868,7 @@ vint32m4_t test_vreinterpret_v_i64m4_i32m4(vint64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vreinterpret_v_i64m8_i32m8(vint64m8_t src) { - return vreinterpret_v_i64m8_i32m8(src); + return __riscv_vreinterpret_v_i64m8_i32m8(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u32m1( @@ -1877,7 +1877,7 @@ vint32m8_t test_vreinterpret_v_i64m8_i32m8(vint64m8_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vreinterpret_v_u64m1_u32m1(vuint64m1_t src) { - return vreinterpret_v_u64m1_u32m1(src); + return __riscv_vreinterpret_v_u64m1_u32m1(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u32m2( @@ -1886,7 +1886,7 @@ vuint32m1_t test_vreinterpret_v_u64m1_u32m1(vuint64m1_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vreinterpret_v_u64m2_u32m2(vuint64m2_t src) { - return vreinterpret_v_u64m2_u32m2(src); + return __riscv_vreinterpret_v_u64m2_u32m2(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u32m4( @@ -1895,7 +1895,7 @@ vuint32m2_t test_vreinterpret_v_u64m2_u32m2(vuint64m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vreinterpret_v_u64m4_u32m4(vuint64m4_t src) { - return vreinterpret_v_u64m4_u32m4(src); + return __riscv_vreinterpret_v_u64m4_u32m4(src); } // CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u32m8( @@ -1904,6 +1904,6 @@ vuint32m4_t test_vreinterpret_v_u64m4_u32m4(vuint64m4_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vreinterpret_v_u64m8_u32m8(vuint64m8_t src) { - return vreinterpret_v_u64m8_u32m8(src); + return __riscv_vreinterpret_v_u64m8_u32m8(src); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrem.c index 0be216c542f1..6456979897d3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrem.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vrem_vv_i8mf8(op1, op2, vl); + return __riscv_vrem_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf8(op1, op2, vl); + return __riscv_vrem_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vrem_vv_i8mf4(op1, op2, vl); + return __riscv_vrem_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf4(op1, op2, vl); + return __riscv_vrem_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vrem_vv_i8mf2(op1, op2, vl); + return __riscv_vrem_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf2(op1, op2, vl); + return __riscv_vrem_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vrem_vv_i8m1(op1, op2, vl); + return __riscv_vrem_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m1(op1, op2, vl); + return __riscv_vrem_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vrem_vv_i8m2(op1, op2, vl); + return __riscv_vrem_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m2(op1, op2, vl); + return __riscv_vrem_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vrem_vv_i8m4(op1, op2, vl); + return __riscv_vrem_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m4(op1, op2, vl); + return __riscv_vrem_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vrem_vv_i8m8(op1, op2, vl); + return __riscv_vrem_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m8(op1, op2, vl); + return __riscv_vrem_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vrem_vv_i16mf4(op1, op2, vl); + return __riscv_vrem_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf4(op1, op2, vl); + return __riscv_vrem_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vrem_vv_i16mf2(op1, op2, vl); + return __riscv_vrem_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf2(op1, op2, vl); + return __riscv_vrem_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vrem_vv_i16m1(op1, op2, vl); + return __riscv_vrem_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m1(op1, op2, vl); + return __riscv_vrem_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vrem_vv_i16m2(op1, op2, vl); + return __riscv_vrem_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m2(op1, op2, vl); + return __riscv_vrem_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vrem_vv_i16m4(op1, op2, vl); + return __riscv_vrem_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m4(op1, op2, vl); + return __riscv_vrem_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vrem_vv_i16m8(op1, op2, vl); + return __riscv_vrem_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m8(op1, op2, vl); + return __riscv_vrem_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vrem_vv_i32mf2(op1, op2, vl); + return __riscv_vrem_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32mf2(op1, op2, vl); + return __riscv_vrem_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vrem_vv_i32m1(op1, op2, vl); + return __riscv_vrem_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m1(op1, op2, vl); + return __riscv_vrem_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vrem_vv_i32m2(op1, op2, vl); + return __riscv_vrem_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m2(op1, op2, vl); + return __riscv_vrem_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vrem_vv_i32m4(op1, op2, vl); + return __riscv_vrem_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m4(op1, op2, vl); + return __riscv_vrem_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vrem_vv_i32m8(op1, op2, vl); + return __riscv_vrem_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m8(op1, op2, vl); + return __riscv_vrem_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vrem_vv_i64m1(op1, op2, vl); + return __riscv_vrem_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m1(op1, op2, vl); + return __riscv_vrem_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vrem_vv_i64m2(op1, op2, vl); + return __riscv_vrem_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m2(op1, op2, vl); + return __riscv_vrem_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vrem_vv_i64m4(op1, op2, vl); + return __riscv_vrem_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m4(op1, op2, vl); + return __riscv_vrem_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vrem_vv_i64m8(op1, op2, vl); + return __riscv_vrem_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m8(op1, op2, vl); + return __riscv_vrem_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vrem_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vrem_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vrem_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vrem_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vrem_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vrem_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vrem_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vrem_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vrem_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vrem_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vrem_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vrem_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vrem_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vrem_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vrem_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vrem_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vrem_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vrem_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vrem_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vrem_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vrem_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vrem_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vrem_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vrem_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vremu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vremu.c index 2058af63bc5c..b90ecc7826a6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vremu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vremu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vremu_vv_u8mf8(op1, op2, vl); + return __riscv_vremu_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf8(op1, op2, vl); + return __riscv_vremu_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vremu_vv_u8mf4(op1, op2, vl); + return __riscv_vremu_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf4(op1, op2, vl); + return __riscv_vremu_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vremu_vv_u8mf2(op1, op2, vl); + return __riscv_vremu_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf2(op1, op2, vl); + return __riscv_vremu_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vremu_vv_u8m1(op1, op2, vl); + return __riscv_vremu_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m1(op1, op2, vl); + return __riscv_vremu_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vremu_vv_u8m2(op1, op2, vl); + return __riscv_vremu_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m2(op1, op2, vl); + return __riscv_vremu_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vremu_vv_u8m4(op1, op2, vl); + return __riscv_vremu_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m4(op1, op2, vl); + return __riscv_vremu_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vremu_vv_u8m8(op1, op2, vl); + return __riscv_vremu_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m8(op1, op2, vl); + return __riscv_vremu_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vremu_vv_u16mf4(op1, op2, vl); + return __riscv_vremu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf4(op1, op2, vl); + return __riscv_vremu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vremu_vv_u16mf2(op1, op2, vl); + return __riscv_vremu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf2(op1, op2, vl); + return __riscv_vremu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vremu_vv_u16m1(op1, op2, vl); + return __riscv_vremu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m1(op1, op2, vl); + return __riscv_vremu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vremu_vv_u16m2(op1, op2, vl); + return __riscv_vremu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m2(op1, op2, vl); + return __riscv_vremu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vremu_vv_u16m4(op1, op2, vl); + return __riscv_vremu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m4(op1, op2, vl); + return __riscv_vremu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vremu_vv_u16m8(op1, op2, vl); + return __riscv_vremu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m8(op1, op2, vl); + return __riscv_vremu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vremu_vv_u32mf2(op1, op2, vl); + return __riscv_vremu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32mf2(op1, op2, vl); + return __riscv_vremu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vremu_vv_u32m1(op1, op2, vl); + return __riscv_vremu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m1(op1, op2, vl); + return __riscv_vremu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vremu_vv_u32m2(op1, op2, vl); + return __riscv_vremu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m2(op1, op2, vl); + return __riscv_vremu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vremu_vv_u32m4(op1, op2, vl); + return __riscv_vremu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m4(op1, op2, vl); + return __riscv_vremu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vremu_vv_u32m8(op1, op2, vl); + return __riscv_vremu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m8(op1, op2, vl); + return __riscv_vremu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m1( @@ -336,7 +336,7 @@ vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vremu_vv_u64m1(op1, op2, vl); + return __riscv_vremu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m1( @@ -345,7 +345,7 @@ vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m1(op1, op2, vl); + return __riscv_vremu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m2( @@ -354,7 +354,7 @@ vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vremu_vv_u64m2(op1, op2, vl); + return __riscv_vremu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m2( @@ -363,7 +363,7 @@ vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m2(op1, op2, vl); + return __riscv_vremu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m4( @@ -372,7 +372,7 @@ vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vremu_vv_u64m4(op1, op2, vl); + return __riscv_vremu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m4( @@ -381,7 +381,7 @@ vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m4(op1, op2, vl); + return __riscv_vremu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m8( @@ -390,7 +390,7 @@ vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vremu_vv_u64m8(op1, op2, vl); + return __riscv_vremu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m8(op1, op2, vl); + return __riscv_vremu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vremu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m( @@ -417,7 +417,7 @@ vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m( @@ -426,7 +426,7 @@ vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vremu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m( @@ -435,7 +435,7 @@ vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m( @@ -444,7 +444,7 @@ vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vremu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m( @@ -453,7 +453,7 @@ vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m( @@ -462,7 +462,7 @@ vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vremu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m( @@ -471,7 +471,7 @@ vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m( @@ -480,7 +480,7 @@ vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vremu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m( @@ -489,7 +489,7 @@ vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m( @@ -498,7 +498,7 @@ vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vremu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m( @@ -507,7 +507,7 @@ vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m( @@ -516,7 +516,7 @@ vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vremu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m( @@ -525,7 +525,7 @@ vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m( @@ -534,7 +534,7 @@ vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vremu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m( @@ -543,7 +543,7 @@ vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m( @@ -552,7 +552,7 @@ vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vremu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m( @@ -561,7 +561,7 @@ vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m( @@ -570,7 +570,7 @@ vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vremu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m( @@ -579,7 +579,7 @@ vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m( @@ -588,7 +588,7 @@ vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vremu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m( @@ -597,7 +597,7 @@ vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m( @@ -606,7 +606,7 @@ vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vremu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m( @@ -615,7 +615,7 @@ vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m( @@ -624,7 +624,7 @@ vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vremu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m( @@ -633,7 +633,7 @@ vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m( @@ -642,7 +642,7 @@ vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vremu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m( @@ -651,7 +651,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m( @@ -660,7 +660,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vremu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m( @@ -669,7 +669,7 @@ vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m( @@ -678,7 +678,7 @@ vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vremu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m( @@ -687,7 +687,7 @@ vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m( @@ -696,7 +696,7 @@ vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vremu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m( @@ -705,7 +705,7 @@ vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m( @@ -714,7 +714,7 @@ vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vremu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m( @@ -723,7 +723,7 @@ vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m( @@ -732,7 +732,7 @@ vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vremu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m( @@ -741,7 +741,7 @@ vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m( @@ -750,7 +750,7 @@ vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vremu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m( @@ -759,7 +759,7 @@ vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m( @@ -768,7 +768,7 @@ vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vremu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m( @@ -777,7 +777,7 @@ vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m( @@ -786,7 +786,7 @@ vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vremu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vremu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vremu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c index bedd2b8cd2c3..031cd1bc4089 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_f16mf4(op1, index, vl); + return __riscv_vrgather_vv_f16mf4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t index, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vx_f16mf4(vfloat16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf4(op1, index, vl); + return __riscv_vrgather_vx_f16mf4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4(vfloat16mf4_t op1, size_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_f16mf2(op1, index, vl); + return __riscv_vrgather_vv_f16mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t index, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vx_f16mf2(vfloat16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf2(op1, index, vl); + return __riscv_vrgather_vx_f16mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2(vfloat16mf2_t op1, size_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vv_f16m1(vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_f16m1(op1, index, vl); + return __riscv_vrgather_vv_f16m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1( @@ -58,7 +58,7 @@ vfloat16m1_t test_vrgather_vv_f16m1(vfloat16m1_t op1, vuint16m1_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vx_f16m1(vfloat16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m1(op1, index, vl); + return __riscv_vrgather_vx_f16m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2( @@ -67,7 +67,7 @@ vfloat16m1_t test_vrgather_vx_f16m1(vfloat16m1_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vv_f16m2(vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_f16m2(op1, index, vl); + return __riscv_vrgather_vv_f16m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2( @@ -76,7 +76,7 @@ vfloat16m2_t test_vrgather_vv_f16m2(vfloat16m2_t op1, vuint16m2_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vx_f16m2(vfloat16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m2(op1, index, vl); + return __riscv_vrgather_vx_f16m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4( @@ -85,7 +85,7 @@ vfloat16m2_t test_vrgather_vx_f16m2(vfloat16m2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vv_f16m4(vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_f16m4(op1, index, vl); + return __riscv_vrgather_vv_f16m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4( @@ -94,7 +94,7 @@ vfloat16m4_t test_vrgather_vv_f16m4(vfloat16m4_t op1, vuint16m4_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vx_f16m4(vfloat16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m4(op1, index, vl); + return __riscv_vrgather_vx_f16m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8( @@ -103,7 +103,7 @@ vfloat16m4_t test_vrgather_vx_f16m4(vfloat16m4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vv_f16m8(vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_f16m8(op1, index, vl); + return __riscv_vrgather_vv_f16m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8( @@ -112,7 +112,7 @@ vfloat16m8_t test_vrgather_vv_f16m8(vfloat16m8_t op1, vuint16m8_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vx_f16m8(vfloat16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m8(op1, index, vl); + return __riscv_vrgather_vx_f16m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2( @@ -121,7 +121,7 @@ vfloat16m8_t test_vrgather_vx_f16m8(vfloat16m8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_f32mf2(op1, index, vl); + return __riscv_vrgather_vv_f32mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32mf2(op1, index, vl); + return __riscv_vrgather_vx_f32mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_f32m1(op1, index, vl); + return __riscv_vrgather_vv_f32m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1( @@ -148,7 +148,7 @@ vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m1(op1, index, vl); + return __riscv_vrgather_vx_f32m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2( @@ -157,7 +157,7 @@ vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_f32m2(op1, index, vl); + return __riscv_vrgather_vv_f32m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2( @@ -166,7 +166,7 @@ vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m2(op1, index, vl); + return __riscv_vrgather_vx_f32m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4( @@ -175,7 +175,7 @@ vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_f32m4(op1, index, vl); + return __riscv_vrgather_vv_f32m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4( @@ -184,7 +184,7 @@ vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m4(op1, index, vl); + return __riscv_vrgather_vx_f32m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8( @@ -193,7 +193,7 @@ vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_f32m8(op1, index, vl); + return __riscv_vrgather_vv_f32m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8( @@ -202,7 +202,7 @@ vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m8(op1, index, vl); + return __riscv_vrgather_vx_f32m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1( @@ -211,7 +211,7 @@ vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_f64m1(op1, index, vl); + return __riscv_vrgather_vv_f64m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1( @@ -220,7 +220,7 @@ vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m1(op1, index, vl); + return __riscv_vrgather_vx_f64m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2( @@ -229,7 +229,7 @@ vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_f64m2(op1, index, vl); + return __riscv_vrgather_vv_f64m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2( @@ -238,7 +238,7 @@ vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m2(op1, index, vl); + return __riscv_vrgather_vx_f64m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4( @@ -247,7 +247,7 @@ vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_f64m4(op1, index, vl); + return __riscv_vrgather_vv_f64m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4( @@ -256,7 +256,7 @@ vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m4(op1, index, vl); + return __riscv_vrgather_vx_f64m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8( @@ -265,7 +265,7 @@ vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_f64m8(op1, index, vl); + return __riscv_vrgather_vv_f64m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8( @@ -274,7 +274,7 @@ vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m8(op1, index, vl); + return __riscv_vrgather_vx_f64m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8( @@ -283,7 +283,7 @@ vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_i8mf8(op1, index, vl); + return __riscv_vrgather_vv_i8mf8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8( @@ -292,7 +292,7 @@ vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf8(op1, index, vl); + return __riscv_vrgather_vx_i8mf8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4( @@ -301,7 +301,7 @@ vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_i8mf4(op1, index, vl); + return __riscv_vrgather_vv_i8mf4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4( @@ -310,7 +310,7 @@ vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf4(op1, index, vl); + return __riscv_vrgather_vx_i8mf4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2( @@ -319,7 +319,7 @@ vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_i8mf2(op1, index, vl); + return __riscv_vrgather_vv_i8mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2( @@ -328,7 +328,7 @@ vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf2(op1, index, vl); + return __riscv_vrgather_vx_i8mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1( @@ -337,7 +337,7 @@ vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_i8m1(op1, index, vl); + return __riscv_vrgather_vv_i8m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1( @@ -346,7 +346,7 @@ vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m1(op1, index, vl); + return __riscv_vrgather_vx_i8m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2( @@ -355,7 +355,7 @@ vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_i8m2(op1, index, vl); + return __riscv_vrgather_vv_i8m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2( @@ -364,7 +364,7 @@ vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m2(op1, index, vl); + return __riscv_vrgather_vx_i8m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4( @@ -373,7 +373,7 @@ vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_i8m4(op1, index, vl); + return __riscv_vrgather_vv_i8m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4( @@ -382,7 +382,7 @@ vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m4(op1, index, vl); + return __riscv_vrgather_vx_i8m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8( @@ -391,7 +391,7 @@ vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_i8m8(op1, index, vl); + return __riscv_vrgather_vv_i8m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8( @@ -400,7 +400,7 @@ vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m8(op1, index, vl); + return __riscv_vrgather_vx_i8m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4( @@ -409,7 +409,7 @@ vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_i16mf4(op1, index, vl); + return __riscv_vrgather_vv_i16mf4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4( @@ -418,7 +418,7 @@ vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf4(op1, index, vl); + return __riscv_vrgather_vx_i16mf4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2( @@ -427,7 +427,7 @@ vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_i16mf2(op1, index, vl); + return __riscv_vrgather_vv_i16mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2( @@ -436,7 +436,7 @@ vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf2(op1, index, vl); + return __riscv_vrgather_vx_i16mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1( @@ -445,7 +445,7 @@ vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_i16m1(op1, index, vl); + return __riscv_vrgather_vv_i16m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1( @@ -454,7 +454,7 @@ vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m1(op1, index, vl); + return __riscv_vrgather_vx_i16m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2( @@ -463,7 +463,7 @@ vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_i16m2(op1, index, vl); + return __riscv_vrgather_vv_i16m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2( @@ -472,7 +472,7 @@ vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m2(op1, index, vl); + return __riscv_vrgather_vx_i16m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4( @@ -481,7 +481,7 @@ vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_i16m4(op1, index, vl); + return __riscv_vrgather_vv_i16m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4( @@ -490,7 +490,7 @@ vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m4(op1, index, vl); + return __riscv_vrgather_vx_i16m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8( @@ -499,7 +499,7 @@ vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_i16m8(op1, index, vl); + return __riscv_vrgather_vv_i16m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8( @@ -508,7 +508,7 @@ vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m8(op1, index, vl); + return __riscv_vrgather_vx_i16m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2( @@ -517,7 +517,7 @@ vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_i32mf2(op1, index, vl); + return __riscv_vrgather_vv_i32mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2( @@ -526,7 +526,7 @@ vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32mf2(op1, index, vl); + return __riscv_vrgather_vx_i32mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1( @@ -535,7 +535,7 @@ vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_i32m1(op1, index, vl); + return __riscv_vrgather_vv_i32m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1( @@ -544,7 +544,7 @@ vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m1(op1, index, vl); + return __riscv_vrgather_vx_i32m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2( @@ -553,7 +553,7 @@ vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_i32m2(op1, index, vl); + return __riscv_vrgather_vv_i32m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2( @@ -562,7 +562,7 @@ vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m2(op1, index, vl); + return __riscv_vrgather_vx_i32m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4( @@ -571,7 +571,7 @@ vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_i32m4(op1, index, vl); + return __riscv_vrgather_vv_i32m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4( @@ -580,7 +580,7 @@ vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m4(op1, index, vl); + return __riscv_vrgather_vx_i32m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8( @@ -589,7 +589,7 @@ vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_i32m8(op1, index, vl); + return __riscv_vrgather_vv_i32m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8( @@ -598,7 +598,7 @@ vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m8(op1, index, vl); + return __riscv_vrgather_vx_i32m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1( @@ -607,7 +607,7 @@ vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_i64m1(op1, index, vl); + return __riscv_vrgather_vv_i64m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1( @@ -616,7 +616,7 @@ vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m1(op1, index, vl); + return __riscv_vrgather_vx_i64m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2( @@ -625,7 +625,7 @@ vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_i64m2(op1, index, vl); + return __riscv_vrgather_vv_i64m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2( @@ -634,7 +634,7 @@ vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m2(op1, index, vl); + return __riscv_vrgather_vx_i64m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4( @@ -643,7 +643,7 @@ vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_i64m4(op1, index, vl); + return __riscv_vrgather_vv_i64m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4( @@ -652,7 +652,7 @@ vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m4(op1, index, vl); + return __riscv_vrgather_vx_i64m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8( @@ -661,7 +661,7 @@ vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_i64m8(op1, index, vl); + return __riscv_vrgather_vv_i64m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8( @@ -670,7 +670,7 @@ vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m8(op1, index, vl); + return __riscv_vrgather_vx_i64m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8( @@ -679,7 +679,7 @@ vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_u8mf8(op1, index, vl); + return __riscv_vrgather_vv_u8mf8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8( @@ -688,7 +688,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf8(op1, index, vl); + return __riscv_vrgather_vx_u8mf8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4( @@ -697,7 +697,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_u8mf4(op1, index, vl); + return __riscv_vrgather_vv_u8mf4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4( @@ -706,7 +706,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf4(op1, index, vl); + return __riscv_vrgather_vx_u8mf4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2( @@ -715,7 +715,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_u8mf2(op1, index, vl); + return __riscv_vrgather_vv_u8mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2( @@ -724,7 +724,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf2(op1, index, vl); + return __riscv_vrgather_vx_u8mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1( @@ -733,7 +733,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_u8m1(op1, index, vl); + return __riscv_vrgather_vv_u8m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1( @@ -742,7 +742,7 @@ vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m1(op1, index, vl); + return __riscv_vrgather_vx_u8m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2( @@ -751,7 +751,7 @@ vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_u8m2(op1, index, vl); + return __riscv_vrgather_vv_u8m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2( @@ -760,7 +760,7 @@ vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m2(op1, index, vl); + return __riscv_vrgather_vx_u8m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4( @@ -769,7 +769,7 @@ vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_u8m4(op1, index, vl); + return __riscv_vrgather_vv_u8m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4( @@ -778,7 +778,7 @@ vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m4(op1, index, vl); + return __riscv_vrgather_vx_u8m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8( @@ -787,7 +787,7 @@ vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_u8m8(op1, index, vl); + return __riscv_vrgather_vv_u8m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8( @@ -796,7 +796,7 @@ vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m8(op1, index, vl); + return __riscv_vrgather_vx_u8m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4( @@ -805,7 +805,7 @@ vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_u16mf4(op1, index, vl); + return __riscv_vrgather_vv_u16mf4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4( @@ -814,7 +814,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf4(op1, index, vl); + return __riscv_vrgather_vx_u16mf4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2( @@ -823,7 +823,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_u16mf2(op1, index, vl); + return __riscv_vrgather_vv_u16mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2( @@ -832,7 +832,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf2(op1, index, vl); + return __riscv_vrgather_vx_u16mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1( @@ -841,7 +841,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_u16m1(op1, index, vl); + return __riscv_vrgather_vv_u16m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1( @@ -850,7 +850,7 @@ vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m1(op1, index, vl); + return __riscv_vrgather_vx_u16m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2( @@ -859,7 +859,7 @@ vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_u16m2(op1, index, vl); + return __riscv_vrgather_vv_u16m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2( @@ -868,7 +868,7 @@ vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m2(op1, index, vl); + return __riscv_vrgather_vx_u16m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4( @@ -877,7 +877,7 @@ vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_u16m4(op1, index, vl); + return __riscv_vrgather_vv_u16m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4( @@ -886,7 +886,7 @@ vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m4(op1, index, vl); + return __riscv_vrgather_vx_u16m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8( @@ -895,7 +895,7 @@ vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_u16m8(op1, index, vl); + return __riscv_vrgather_vv_u16m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8( @@ -904,7 +904,7 @@ vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m8(op1, index, vl); + return __riscv_vrgather_vx_u16m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2( @@ -913,7 +913,7 @@ vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_u32mf2(op1, index, vl); + return __riscv_vrgather_vv_u32mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2( @@ -922,7 +922,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32mf2(op1, index, vl); + return __riscv_vrgather_vx_u32mf2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1( @@ -931,7 +931,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_u32m1(op1, index, vl); + return __riscv_vrgather_vv_u32m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1( @@ -940,7 +940,7 @@ vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m1(op1, index, vl); + return __riscv_vrgather_vx_u32m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2( @@ -949,7 +949,7 @@ vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_u32m2(op1, index, vl); + return __riscv_vrgather_vv_u32m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2( @@ -958,7 +958,7 @@ vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m2(op1, index, vl); + return __riscv_vrgather_vx_u32m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4( @@ -967,7 +967,7 @@ vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_u32m4(op1, index, vl); + return __riscv_vrgather_vv_u32m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4( @@ -976,7 +976,7 @@ vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m4(op1, index, vl); + return __riscv_vrgather_vx_u32m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8( @@ -985,7 +985,7 @@ vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_u32m8(op1, index, vl); + return __riscv_vrgather_vv_u32m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8( @@ -994,7 +994,7 @@ vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m8(op1, index, vl); + return __riscv_vrgather_vx_u32m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1( @@ -1003,7 +1003,7 @@ vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_u64m1(op1, index, vl); + return __riscv_vrgather_vv_u64m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1( @@ -1012,7 +1012,7 @@ vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m1(op1, index, vl); + return __riscv_vrgather_vx_u64m1(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2( @@ -1021,7 +1021,7 @@ vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_u64m2(op1, index, vl); + return __riscv_vrgather_vv_u64m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2( @@ -1030,7 +1030,7 @@ vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m2(op1, index, vl); + return __riscv_vrgather_vx_u64m2(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4( @@ -1039,7 +1039,7 @@ vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_u64m4(op1, index, vl); + return __riscv_vrgather_vv_u64m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4( @@ -1048,7 +1048,7 @@ vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m4(op1, index, vl); + return __riscv_vrgather_vx_u64m4(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8( @@ -1057,7 +1057,7 @@ vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_u64m8(op1, index, vl); + return __riscv_vrgather_vv_u64m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8( @@ -1066,7 +1066,7 @@ vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m8(op1, index, vl); + return __riscv_vrgather_vx_u64m8(op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_m( @@ -1075,7 +1075,7 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_f16mf4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_m( @@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_m( @@ -1093,7 +1093,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_f16mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_m( @@ -1102,7 +1102,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_m( @@ -1111,7 +1111,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_f16m1_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_m( @@ -1120,7 +1120,7 @@ vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m1_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_m( @@ -1129,7 +1129,7 @@ vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_f16m2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_m( @@ -1138,7 +1138,7 @@ vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_m( @@ -1147,7 +1147,7 @@ vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_f16m4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_m( @@ -1156,7 +1156,7 @@ vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_m( @@ -1165,7 +1165,7 @@ vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_f16m8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_m( @@ -1174,7 +1174,7 @@ vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m( @@ -1183,7 +1183,7 @@ vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_f32mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m( @@ -1192,7 +1192,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m( @@ -1201,7 +1201,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_f32m1_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m( @@ -1210,7 +1210,7 @@ vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m1_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m( @@ -1219,7 +1219,7 @@ vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_f32m2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m( @@ -1228,7 +1228,7 @@ vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m( @@ -1237,7 +1237,7 @@ vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_f32m4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m( @@ -1246,7 +1246,7 @@ vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m( @@ -1255,7 +1255,7 @@ vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_f32m8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m( @@ -1264,7 +1264,7 @@ vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m( @@ -1273,7 +1273,7 @@ vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_f64m1_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m( @@ -1282,7 +1282,7 @@ vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m1_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m( @@ -1291,7 +1291,7 @@ vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_f64m2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m( @@ -1300,7 +1300,7 @@ vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m( @@ -1309,7 +1309,7 @@ vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_f64m4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m( @@ -1318,7 +1318,7 @@ vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m( @@ -1327,7 +1327,7 @@ vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_f64m8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_f64m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m( @@ -1336,7 +1336,7 @@ vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_f64m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_m( @@ -1345,7 +1345,7 @@ vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_i8mf8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i8mf8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m( @@ -1354,7 +1354,7 @@ vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i8mf8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m( @@ -1363,7 +1363,7 @@ vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_i8mf4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i8mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m( @@ -1372,7 +1372,7 @@ vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i8mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m( @@ -1381,7 +1381,7 @@ vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_i8mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i8mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m( @@ -1390,7 +1390,7 @@ vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i8mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m( @@ -1399,7 +1399,7 @@ vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_i8m1_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i8m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m( @@ -1408,7 +1408,7 @@ vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m1_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i8m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m( @@ -1417,7 +1417,7 @@ vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t index, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_i8m2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i8m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m( @@ -1426,7 +1426,7 @@ vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i8m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m( @@ -1435,7 +1435,7 @@ vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t index, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_i8m4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i8m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m( @@ -1444,7 +1444,7 @@ vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i8m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m( @@ -1453,7 +1453,7 @@ vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t index, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_i8m8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i8m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m( @@ -1462,7 +1462,7 @@ vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i8m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m( @@ -1471,7 +1471,7 @@ vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t index, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_i16mf4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m( @@ -1480,7 +1480,7 @@ vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m( @@ -1489,7 +1489,7 @@ vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_i16mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m( @@ -1498,7 +1498,7 @@ vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m( @@ -1507,7 +1507,7 @@ vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_i16m1_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m( @@ -1516,7 +1516,7 @@ vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m1_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m( @@ -1525,7 +1525,7 @@ vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_i16m2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m( @@ -1534,7 +1534,7 @@ vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m( @@ -1543,7 +1543,7 @@ vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t index, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_i16m4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m( @@ -1552,7 +1552,7 @@ vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m( @@ -1561,7 +1561,7 @@ vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t index, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_i16m8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m( @@ -1570,7 +1570,7 @@ vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m( @@ -1579,7 +1579,7 @@ vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t index, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_i32mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m( @@ -1588,7 +1588,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m( @@ -1597,7 +1597,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_i32m1_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m( @@ -1606,7 +1606,7 @@ vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m1_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m( @@ -1615,7 +1615,7 @@ vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_i32m2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m( @@ -1624,7 +1624,7 @@ vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m( @@ -1633,7 +1633,7 @@ vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_i32m4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m( @@ -1642,7 +1642,7 @@ vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m( @@ -1651,7 +1651,7 @@ vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t index, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_i32m8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m( @@ -1660,7 +1660,7 @@ vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m( @@ -1669,7 +1669,7 @@ vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t index, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_i64m1_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m( @@ -1678,7 +1678,7 @@ vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m1_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m( @@ -1687,7 +1687,7 @@ vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_i64m2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m( @@ -1696,7 +1696,7 @@ vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m( @@ -1705,7 +1705,7 @@ vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_i64m4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m( @@ -1714,7 +1714,7 @@ vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m( @@ -1723,7 +1723,7 @@ vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t index // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_i64m8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_i64m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m( @@ -1732,7 +1732,7 @@ vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_i64m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m( @@ -1741,7 +1741,7 @@ vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t index, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_u8mf8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u8mf8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m( @@ -1750,7 +1750,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u8mf8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m( @@ -1759,7 +1759,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_u8mf4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u8mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m( @@ -1768,7 +1768,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u8mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m( @@ -1777,7 +1777,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_u8mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u8mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m( @@ -1786,7 +1786,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u8mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m( @@ -1795,7 +1795,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_u8m1_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u8m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m( @@ -1804,7 +1804,7 @@ vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m1_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u8m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m( @@ -1813,7 +1813,7 @@ vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t index, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_u8m2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u8m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m( @@ -1822,7 +1822,7 @@ vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u8m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m( @@ -1831,7 +1831,7 @@ vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t index, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_u8m4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u8m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m( @@ -1840,7 +1840,7 @@ vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u8m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m( @@ -1849,7 +1849,7 @@ vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t index, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_u8m8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u8m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m( @@ -1858,7 +1858,7 @@ vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u8m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m( @@ -1867,7 +1867,7 @@ vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t index, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_u16mf4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m( @@ -1876,7 +1876,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u16mf4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m( @@ -1885,7 +1885,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_u16mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m( @@ -1894,7 +1894,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u16mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m( @@ -1903,7 +1903,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_u16m1_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m( @@ -1912,7 +1912,7 @@ vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m1_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u16m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m( @@ -1921,7 +1921,7 @@ vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_u16m2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m( @@ -1930,7 +1930,7 @@ vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u16m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m( @@ -1939,7 +1939,7 @@ vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t inde // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_u16m4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m( @@ -1948,7 +1948,7 @@ vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u16m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m( @@ -1957,7 +1957,7 @@ vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t inde // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_u16m8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m( @@ -1966,7 +1966,7 @@ vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u16m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m( @@ -1975,7 +1975,7 @@ vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t inde // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_u32mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m( @@ -1984,7 +1984,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32mf2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u32mf2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m( @@ -1993,7 +1993,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_u32m1_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m( @@ -2002,7 +2002,7 @@ vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m1_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u32m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m( @@ -2011,7 +2011,7 @@ vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_u32m2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m( @@ -2020,7 +2020,7 @@ vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u32m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m( @@ -2029,7 +2029,7 @@ vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_u32m4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m( @@ -2038,7 +2038,7 @@ vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u32m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m( @@ -2047,7 +2047,7 @@ vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t inde // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_u32m8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m( @@ -2056,7 +2056,7 @@ vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u32m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m( @@ -2065,7 +2065,7 @@ vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t inde // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_u64m1_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m( @@ -2074,7 +2074,7 @@ vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m1_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u64m1_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m( @@ -2083,7 +2083,7 @@ vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_u64m2_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m( @@ -2092,7 +2092,7 @@ vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m2_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u64m2_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m( @@ -2101,7 +2101,7 @@ vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_u64m4_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m( @@ -2110,7 +2110,7 @@ vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m4_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u64m4_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m( @@ -2119,7 +2119,7 @@ vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t ind // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_u64m8_m(mask, op1, index, vl); + return __riscv_vrgather_vv_u64m8_m(mask, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m( @@ -2128,6 +2128,6 @@ vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m8_m(mask, op1, index, vl); + return __riscv_vrgather_vx_u64m8_m(mask, op1, index, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgatherei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgatherei16.c index 4ff4c3b18d32..0bee3bbea871 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgatherei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgatherei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgatherei16_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f16mf4(op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgatherei16_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f16mf2(op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgatherei16_vv_f16m1(vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f16m1(op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1(vfloat16m1_t op1, vuint16m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgatherei16_vv_f16m2(vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f16m2(op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2(vfloat16m2_t op1, vuint16m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgatherei16_vv_f16m4(vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f16m4(op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4(vfloat16m4_t op1, vuint16m4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgatherei16_vv_f16m8(vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_f16m8(op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8(vfloat16m8_t op1, vuint16m8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f32mf2(op1, op2, vl); + return __riscv_vrgatherei16_vv_f32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f32m1(op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f32m2(op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f32m4(op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f32m8(op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f64m1(op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f64m2(op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f64m4(op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f64m8(op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8( @@ -148,7 +148,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i8mf8(op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4( @@ -157,7 +157,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i8mf4(op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2( @@ -166,7 +166,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i8mf2(op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1( @@ -175,7 +175,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i8m1(op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2( @@ -184,7 +184,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i8m2(op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4( @@ -193,7 +193,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i8m4(op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4( @@ -202,7 +202,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i16mf4(op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2( @@ -211,7 +211,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i16mf2(op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1( @@ -220,7 +220,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i16m1(op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2( @@ -229,7 +229,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i16m2(op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4( @@ -238,7 +238,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i16m4(op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8( @@ -247,7 +247,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i16m8(op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2( @@ -256,7 +256,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i32mf2(op1, op2, vl); + return __riscv_vrgatherei16_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1( @@ -265,7 +265,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i32m1(op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2( @@ -274,7 +274,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i32m2(op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4( @@ -283,7 +283,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i32m4(op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8( @@ -292,7 +292,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i32m8(op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1( @@ -301,7 +301,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i64m1(op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2( @@ -310,7 +310,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i64m2(op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4( @@ -319,7 +319,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i64m4(op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8( @@ -328,7 +328,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i64m8(op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8( @@ -337,7 +337,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u8mf8(op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4( @@ -346,7 +346,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u8mf4(op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2( @@ -355,7 +355,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u8mf2(op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1( @@ -364,7 +364,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u8m1(op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2( @@ -373,7 +373,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u8m2(op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4( @@ -382,7 +382,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u8m4(op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4( @@ -391,7 +391,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u16mf4(op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2( @@ -400,7 +400,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u16mf2(op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1( @@ -409,7 +409,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u16m1(op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2( @@ -418,7 +418,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u16m2(op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4( @@ -427,7 +427,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u16m4(op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8( @@ -436,7 +436,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u16m8(op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2( @@ -445,7 +445,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u32mf2(op1, op2, vl); + return __riscv_vrgatherei16_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1( @@ -454,7 +454,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u32m1(op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2( @@ -463,7 +463,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u32m2(op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4( @@ -472,7 +472,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u32m4(op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8( @@ -481,7 +481,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u32m8(op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1( @@ -490,7 +490,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u64m1(op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2( @@ -499,7 +499,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u64m2(op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4( @@ -508,7 +508,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u64m4(op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8( @@ -517,7 +517,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u64m8(op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_m( @@ -526,7 +526,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f16mf4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_m( @@ -535,7 +535,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f16mf2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_m( @@ -544,7 +544,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f16m1_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_m( @@ -553,7 +553,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f16m2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_m( @@ -562,7 +562,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f16m4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_m( @@ -571,7 +571,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_f16m8_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m( @@ -580,7 +580,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f32mf2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m( @@ -589,7 +589,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f32m1_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m( @@ -598,7 +598,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f32m2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m( @@ -607,7 +607,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f32m4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m( @@ -616,7 +616,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f32m8_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m( @@ -625,7 +625,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f64m1_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m( @@ -634,7 +634,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f64m2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m( @@ -643,7 +643,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f64m4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m( @@ -652,7 +652,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f64m8_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m( @@ -661,7 +661,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m( @@ -670,7 +670,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m( @@ -679,7 +679,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m( @@ -688,7 +688,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m( @@ -697,7 +697,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m( @@ -706,7 +706,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m( @@ -715,7 +715,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m( @@ -724,7 +724,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m( @@ -733,7 +733,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m( @@ -742,7 +742,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m( @@ -751,7 +751,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m( @@ -760,7 +760,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m( @@ -769,7 +769,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m( @@ -778,7 +778,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m( @@ -787,7 +787,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m( @@ -796,7 +796,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m( @@ -805,7 +805,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m( @@ -814,7 +814,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m( @@ -823,7 +823,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m( @@ -832,7 +832,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m( @@ -841,7 +841,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m( @@ -850,7 +850,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m( @@ -859,7 +859,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m( @@ -868,7 +868,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m( @@ -877,7 +877,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m( @@ -886,7 +886,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m( @@ -895,7 +895,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m( @@ -904,7 +904,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m( @@ -913,7 +913,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m( @@ -922,7 +922,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m( @@ -931,7 +931,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m( @@ -940,7 +940,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m( @@ -949,7 +949,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m( @@ -958,7 +958,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m( @@ -967,7 +967,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m( @@ -976,7 +976,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m( @@ -985,7 +985,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m( @@ -994,7 +994,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m( @@ -1003,7 +1003,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m( @@ -1012,7 +1012,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m( @@ -1021,7 +1021,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m( @@ -1030,6 +1030,6 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrsub.c index b807a5163b11..494ac7a30218 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrsub.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf8(op1, op2, vl); + return __riscv_vrsub_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4( @@ -21,7 +21,7 @@ vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf4(op1, op2, vl); + return __riscv_vrsub_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2( @@ -30,7 +30,7 @@ vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf2(op1, op2, vl); + return __riscv_vrsub_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1( @@ -39,7 +39,7 @@ vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m1(op1, op2, vl); + return __riscv_vrsub_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2( @@ -48,7 +48,7 @@ vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m2(op1, op2, vl); + return __riscv_vrsub_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4( @@ -57,7 +57,7 @@ vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m4(op1, op2, vl); + return __riscv_vrsub_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8( @@ -66,7 +66,7 @@ vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m8(op1, op2, vl); + return __riscv_vrsub_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4( @@ -75,7 +75,7 @@ vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf4(op1, op2, vl); + return __riscv_vrsub_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2( @@ -84,7 +84,7 @@ vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf2(op1, op2, vl); + return __riscv_vrsub_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1( @@ -93,7 +93,7 @@ vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m1(op1, op2, vl); + return __riscv_vrsub_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2( @@ -102,7 +102,7 @@ vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m2(op1, op2, vl); + return __riscv_vrsub_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4( @@ -111,7 +111,7 @@ vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m4(op1, op2, vl); + return __riscv_vrsub_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8( @@ -120,7 +120,7 @@ vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m8(op1, op2, vl); + return __riscv_vrsub_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2( @@ -129,7 +129,7 @@ vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32mf2(op1, op2, vl); + return __riscv_vrsub_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1( @@ -138,7 +138,7 @@ vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m1(op1, op2, vl); + return __riscv_vrsub_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2( @@ -147,7 +147,7 @@ vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m2(op1, op2, vl); + return __riscv_vrsub_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4( @@ -156,7 +156,7 @@ vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m4(op1, op2, vl); + return __riscv_vrsub_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8( @@ -165,7 +165,7 @@ vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m8(op1, op2, vl); + return __riscv_vrsub_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1( @@ -174,7 +174,7 @@ vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m1(op1, op2, vl); + return __riscv_vrsub_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2( @@ -183,7 +183,7 @@ vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m2(op1, op2, vl); + return __riscv_vrsub_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4( @@ -192,7 +192,7 @@ vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m4(op1, op2, vl); + return __riscv_vrsub_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8( @@ -201,7 +201,7 @@ vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m8(op1, op2, vl); + return __riscv_vrsub_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8( @@ -210,7 +210,7 @@ vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf8(op1, op2, vl); + return __riscv_vrsub_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4( @@ -219,7 +219,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf4(op1, op2, vl); + return __riscv_vrsub_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2( @@ -228,7 +228,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf2(op1, op2, vl); + return __riscv_vrsub_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1( @@ -237,7 +237,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m1(op1, op2, vl); + return __riscv_vrsub_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2( @@ -246,7 +246,7 @@ vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m2(op1, op2, vl); + return __riscv_vrsub_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4( @@ -255,7 +255,7 @@ vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m4(op1, op2, vl); + return __riscv_vrsub_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8( @@ -264,7 +264,7 @@ vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m8(op1, op2, vl); + return __riscv_vrsub_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4( @@ -273,7 +273,7 @@ vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf4(op1, op2, vl); + return __riscv_vrsub_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2( @@ -282,7 +282,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf2(op1, op2, vl); + return __riscv_vrsub_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1( @@ -291,7 +291,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m1(op1, op2, vl); + return __riscv_vrsub_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2( @@ -300,7 +300,7 @@ vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m2(op1, op2, vl); + return __riscv_vrsub_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4( @@ -309,7 +309,7 @@ vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m4(op1, op2, vl); + return __riscv_vrsub_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8( @@ -318,7 +318,7 @@ vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m8(op1, op2, vl); + return __riscv_vrsub_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2( @@ -327,7 +327,7 @@ vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32mf2(op1, op2, vl); + return __riscv_vrsub_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1( @@ -336,7 +336,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m1(op1, op2, vl); + return __riscv_vrsub_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2( @@ -345,7 +345,7 @@ vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m2(op1, op2, vl); + return __riscv_vrsub_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4( @@ -354,7 +354,7 @@ vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m4(op1, op2, vl); + return __riscv_vrsub_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8( @@ -363,7 +363,7 @@ vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m8(op1, op2, vl); + return __riscv_vrsub_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1( @@ -372,7 +372,7 @@ vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m1(op1, op2, vl); + return __riscv_vrsub_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2( @@ -381,7 +381,7 @@ vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m2(op1, op2, vl); + return __riscv_vrsub_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4( @@ -390,7 +390,7 @@ vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m4(op1, op2, vl); + return __riscv_vrsub_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m8(op1, op2, vl); + return __riscv_vrsub_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m( @@ -426,7 +426,7 @@ vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m( @@ -435,7 +435,7 @@ vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m( @@ -444,7 +444,7 @@ vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m( @@ -453,7 +453,7 @@ vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m( @@ -462,7 +462,7 @@ vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m( @@ -471,7 +471,7 @@ vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m( @@ -480,7 +480,7 @@ vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m( @@ -489,7 +489,7 @@ vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m( @@ -498,7 +498,7 @@ vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m( @@ -507,7 +507,7 @@ vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m( @@ -516,7 +516,7 @@ vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m( @@ -525,7 +525,7 @@ vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m( @@ -534,7 +534,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m( @@ -543,7 +543,7 @@ vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m( @@ -552,7 +552,7 @@ vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m( @@ -561,7 +561,7 @@ vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m( @@ -570,7 +570,7 @@ vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m( @@ -579,7 +579,7 @@ vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m( @@ -588,7 +588,7 @@ vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m( @@ -597,7 +597,7 @@ vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m( @@ -606,7 +606,7 @@ vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m( @@ -615,7 +615,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m( @@ -624,7 +624,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m( @@ -633,7 +633,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m( @@ -642,7 +642,7 @@ vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m( @@ -651,7 +651,7 @@ vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m( @@ -660,7 +660,7 @@ vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m( @@ -669,7 +669,7 @@ vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m( @@ -678,7 +678,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m( @@ -687,7 +687,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m( @@ -696,7 +696,7 @@ vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m( @@ -705,7 +705,7 @@ vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m( @@ -714,7 +714,7 @@ vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m( @@ -723,7 +723,7 @@ vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m( @@ -732,7 +732,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m( @@ -741,7 +741,7 @@ vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m( @@ -750,7 +750,7 @@ vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m( @@ -759,7 +759,7 @@ vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m( @@ -768,7 +768,7 @@ vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m( @@ -777,7 +777,7 @@ vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m( @@ -786,7 +786,7 @@ vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vrsub_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c index 616c91f4c045..9c34e4d9e553 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsadd.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsadd_vv_i8mf8(op1, op2, vl); + return __riscv_vsadd_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf8(op1, op2, vl); + return __riscv_vsadd_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsadd_vv_i8mf4(op1, op2, vl); + return __riscv_vsadd_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf4(op1, op2, vl); + return __riscv_vsadd_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsadd_vv_i8mf2(op1, op2, vl); + return __riscv_vsadd_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf2(op1, op2, vl); + return __riscv_vsadd_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsadd_vv_i8m1(op1, op2, vl); + return __riscv_vsadd_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m1(op1, op2, vl); + return __riscv_vsadd_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsadd_vv_i8m2(op1, op2, vl); + return __riscv_vsadd_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m2(op1, op2, vl); + return __riscv_vsadd_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsadd_vv_i8m4(op1, op2, vl); + return __riscv_vsadd_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m4(op1, op2, vl); + return __riscv_vsadd_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsadd_vv_i8m8(op1, op2, vl); + return __riscv_vsadd_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m8(op1, op2, vl); + return __riscv_vsadd_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsadd_vv_i16mf4(op1, op2, vl); + return __riscv_vsadd_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf4(op1, op2, vl); + return __riscv_vsadd_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsadd_vv_i16mf2(op1, op2, vl); + return __riscv_vsadd_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf2(op1, op2, vl); + return __riscv_vsadd_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsadd_vv_i16m1(op1, op2, vl); + return __riscv_vsadd_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m1(op1, op2, vl); + return __riscv_vsadd_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsadd_vv_i16m2(op1, op2, vl); + return __riscv_vsadd_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m2(op1, op2, vl); + return __riscv_vsadd_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsadd_vv_i16m4(op1, op2, vl); + return __riscv_vsadd_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m4(op1, op2, vl); + return __riscv_vsadd_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsadd_vv_i16m8(op1, op2, vl); + return __riscv_vsadd_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m8(op1, op2, vl); + return __riscv_vsadd_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsadd_vv_i32mf2(op1, op2, vl); + return __riscv_vsadd_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32mf2(op1, op2, vl); + return __riscv_vsadd_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsadd_vv_i32m1(op1, op2, vl); + return __riscv_vsadd_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m1(op1, op2, vl); + return __riscv_vsadd_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsadd_vv_i32m2(op1, op2, vl); + return __riscv_vsadd_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m2(op1, op2, vl); + return __riscv_vsadd_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsadd_vv_i32m4(op1, op2, vl); + return __riscv_vsadd_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m4(op1, op2, vl); + return __riscv_vsadd_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsadd_vv_i32m8(op1, op2, vl); + return __riscv_vsadd_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m8(op1, op2, vl); + return __riscv_vsadd_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsadd_vv_i64m1(op1, op2, vl); + return __riscv_vsadd_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m1(op1, op2, vl); + return __riscv_vsadd_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsadd_vv_i64m2(op1, op2, vl); + return __riscv_vsadd_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m2(op1, op2, vl); + return __riscv_vsadd_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsadd_vv_i64m4(op1, op2, vl); + return __riscv_vsadd_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m4(op1, op2, vl); + return __riscv_vsadd_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsadd_vv_i64m8(op1, op2, vl); + return __riscv_vsadd_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m8(op1, op2, vl); + return __riscv_vsadd_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsadd_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsadd_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsadd_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsadd_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsadd_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsadd_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsadd_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsadd_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsadd_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsadd_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsadd_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsadd_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsadd_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsadd_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsadd_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsadd_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsadd_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsadd_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsadd_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsadd_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsadd_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsadd_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vsadd_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c index 5e548d5ca7d0..f16db7d62e5e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsaddu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsaddu_vv_u8mf8(op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf8(op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsaddu_vv_u8mf4(op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf4(op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsaddu_vv_u8mf2(op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf2(op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsaddu_vv_u8m1(op1, op2, vl); + return __riscv_vsaddu_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m1(op1, op2, vl); + return __riscv_vsaddu_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsaddu_vv_u8m2(op1, op2, vl); + return __riscv_vsaddu_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m2(op1, op2, vl); + return __riscv_vsaddu_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsaddu_vv_u8m4(op1, op2, vl); + return __riscv_vsaddu_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m4(op1, op2, vl); + return __riscv_vsaddu_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsaddu_vv_u8m8(op1, op2, vl); + return __riscv_vsaddu_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m8(op1, op2, vl); + return __riscv_vsaddu_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsaddu_vv_u16mf4(op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf4(op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsaddu_vv_u16mf2(op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf2(op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsaddu_vv_u16m1(op1, op2, vl); + return __riscv_vsaddu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m1(op1, op2, vl); + return __riscv_vsaddu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsaddu_vv_u16m2(op1, op2, vl); + return __riscv_vsaddu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m2(op1, op2, vl); + return __riscv_vsaddu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsaddu_vv_u16m4(op1, op2, vl); + return __riscv_vsaddu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m4(op1, op2, vl); + return __riscv_vsaddu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsaddu_vv_u16m8(op1, op2, vl); + return __riscv_vsaddu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m8(op1, op2, vl); + return __riscv_vsaddu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsaddu_vv_u32mf2(op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32mf2(op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsaddu_vv_u32m1(op1, op2, vl); + return __riscv_vsaddu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m1(op1, op2, vl); + return __riscv_vsaddu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsaddu_vv_u32m2(op1, op2, vl); + return __riscv_vsaddu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m2(op1, op2, vl); + return __riscv_vsaddu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsaddu_vv_u32m4(op1, op2, vl); + return __riscv_vsaddu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m4(op1, op2, vl); + return __riscv_vsaddu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsaddu_vv_u32m8(op1, op2, vl); + return __riscv_vsaddu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m8(op1, op2, vl); + return __riscv_vsaddu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1( @@ -336,7 +336,7 @@ vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsaddu_vv_u64m1(op1, op2, vl); + return __riscv_vsaddu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1( @@ -345,7 +345,7 @@ vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m1(op1, op2, vl); + return __riscv_vsaddu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2( @@ -354,7 +354,7 @@ vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsaddu_vv_u64m2(op1, op2, vl); + return __riscv_vsaddu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2( @@ -363,7 +363,7 @@ vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m2(op1, op2, vl); + return __riscv_vsaddu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4( @@ -372,7 +372,7 @@ vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsaddu_vv_u64m4(op1, op2, vl); + return __riscv_vsaddu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4( @@ -381,7 +381,7 @@ vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m4(op1, op2, vl); + return __riscv_vsaddu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8( @@ -390,7 +390,7 @@ vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsaddu_vv_u64m8(op1, op2, vl); + return __riscv_vsaddu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m8(op1, op2, vl); + return __riscv_vsaddu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsaddu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m( @@ -417,7 +417,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m( @@ -426,7 +426,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsaddu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m( @@ -435,7 +435,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m( @@ -444,7 +444,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsaddu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m( @@ -453,7 +453,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m( @@ -462,7 +462,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsaddu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m( @@ -471,7 +471,7 @@ vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m( @@ -480,7 +480,7 @@ vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsaddu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m( @@ -489,7 +489,7 @@ vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m( @@ -498,7 +498,7 @@ vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsaddu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m( @@ -507,7 +507,7 @@ vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m( @@ -516,7 +516,7 @@ vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsaddu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m( @@ -525,7 +525,7 @@ vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m( @@ -534,7 +534,7 @@ vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsaddu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m( @@ -543,7 +543,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m( @@ -552,7 +552,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsaddu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m( @@ -561,7 +561,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m( @@ -570,7 +570,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsaddu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m( @@ -579,7 +579,7 @@ vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m( @@ -588,7 +588,7 @@ vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsaddu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m( @@ -597,7 +597,7 @@ vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m( @@ -606,7 +606,7 @@ vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsaddu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m( @@ -615,7 +615,7 @@ vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m( @@ -624,7 +624,7 @@ vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsaddu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m( @@ -633,7 +633,7 @@ vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m( @@ -642,7 +642,7 @@ vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsaddu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m( @@ -651,7 +651,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m( @@ -660,7 +660,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsaddu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m( @@ -669,7 +669,7 @@ vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m( @@ -678,7 +678,7 @@ vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsaddu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m( @@ -687,7 +687,7 @@ vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m( @@ -696,7 +696,7 @@ vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsaddu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m( @@ -705,7 +705,7 @@ vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m( @@ -714,7 +714,7 @@ vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsaddu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m( @@ -723,7 +723,7 @@ vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m( @@ -732,7 +732,7 @@ vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsaddu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m( @@ -741,7 +741,7 @@ vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m( @@ -750,7 +750,7 @@ vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsaddu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m( @@ -759,7 +759,7 @@ vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m( @@ -768,7 +768,7 @@ vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsaddu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m( @@ -777,7 +777,7 @@ vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m( @@ -786,7 +786,7 @@ vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsaddu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vsaddu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsbc.c index 36321d178f5b..bd94a109e057 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsbc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsbc.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_i8mf8(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8mf8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_i8mf8(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8mf8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_i8mf4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8mf4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_i8mf4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8mf4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_i8mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_i8mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_i8m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_i8m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_i8m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_i8m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vvm_i8m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vxm_i8m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) { - return vsbc_vvm_i8m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) { - return vsbc_vxm_i8m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_i16mf4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16mf4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_i16mf4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16mf4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_i16mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_i16mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_i16m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_i16m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_i16m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_i16m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_i16m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_i16m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vvm_i16m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vxm_i16m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_i32mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i32mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t bor // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_i32mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i32mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_i32m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i32m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_i32m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i32m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_i32m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i32m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_i32m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i32m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_i32m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i32m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_i32m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i32m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_i32m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i32m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_i32m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i32m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_i64m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i64m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_i64m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i64m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_i64m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i64m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_i64m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i64m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_i64m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i64m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_i64m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i64m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_i64m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i64m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_i64m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i64m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8( @@ -408,7 +408,7 @@ vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_u8mf8(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8mf8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8( @@ -417,7 +417,7 @@ vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_u8mf8(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8mf8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4( @@ -426,7 +426,7 @@ vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_u8mf4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8mf4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4( @@ -435,7 +435,7 @@ vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_u8mf4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8mf4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2( @@ -444,7 +444,7 @@ vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_u8mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2( @@ -453,7 +453,7 @@ vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_u8mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1( @@ -462,7 +462,7 @@ vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_u8m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1( @@ -471,7 +471,7 @@ vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_u8m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2( @@ -480,7 +480,7 @@ vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_u8m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2( @@ -489,7 +489,7 @@ vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_u8m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4( @@ -498,7 +498,7 @@ vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vvm_u8m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4( @@ -507,7 +507,7 @@ vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vxm_u8m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8( @@ -516,7 +516,7 @@ vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) { - return vsbc_vvm_u8m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8( @@ -525,7 +525,7 @@ vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) { - return vsbc_vxm_u8m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4( @@ -534,7 +534,7 @@ vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_u16mf4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16mf4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4( @@ -543,7 +543,7 @@ vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_u16mf4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16mf4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2( @@ -552,7 +552,7 @@ vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_u16mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2( @@ -561,7 +561,7 @@ vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_u16mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1( @@ -570,7 +570,7 @@ vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_u16m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1( @@ -579,7 +579,7 @@ vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_u16m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2( @@ -588,7 +588,7 @@ vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_u16m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2( @@ -597,7 +597,7 @@ vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borro // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_u16m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4( @@ -606,7 +606,7 @@ vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_u16m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4( @@ -615,7 +615,7 @@ vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borro // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_u16m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8( @@ -624,7 +624,7 @@ vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vvm_u16m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8( @@ -633,7 +633,7 @@ vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borro // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vxm_u16m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2( @@ -642,7 +642,7 @@ vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_u32mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u32mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2( @@ -651,7 +651,7 @@ vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_u32mf2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u32mf2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1( @@ -660,7 +660,7 @@ vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_u32m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u32m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1( @@ -669,7 +669,7 @@ vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_u32m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u32m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2( @@ -678,7 +678,7 @@ vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_u32m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u32m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2( @@ -687,7 +687,7 @@ vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_u32m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u32m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4( @@ -696,7 +696,7 @@ vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_u32m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u32m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4( @@ -705,7 +705,7 @@ vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borro // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_u32m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u32m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8( @@ -714,7 +714,7 @@ vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_u32m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u32m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8( @@ -723,7 +723,7 @@ vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borro // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_u32m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u32m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1( @@ -732,7 +732,7 @@ vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t borrowin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_u64m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u64m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1( @@ -741,7 +741,7 @@ vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_u64m1(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u64m1(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2( @@ -750,7 +750,7 @@ vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_u64m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u64m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2( @@ -759,7 +759,7 @@ vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_u64m2(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u64m2(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4( @@ -768,7 +768,7 @@ vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_u64m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u64m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4( @@ -777,7 +777,7 @@ vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t borr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_u64m4(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u64m4(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8( @@ -786,7 +786,7 @@ vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t borrowi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_u64m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u64m8(op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8( @@ -795,6 +795,6 @@ vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borro // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_u64m8(op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u64m8(op1, op2, borrowin, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse16.c index 6b299f829a6b..502e44037f4d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16mf4(_Float16 *base, vfloat16mf4_t value, size_t vl) { - return vse16_v_f16mf4(base, value, vl); + return __riscv_vse16_v_f16mf4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vse16_v_f16mf4(_Float16 *base, vfloat16mf4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16mf2(_Float16 *base, vfloat16mf2_t value, size_t vl) { - return vse16_v_f16mf2(base, value, vl); + return __riscv_vse16_v_f16mf2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_f16m1( @@ -31,7 +31,7 @@ void test_vse16_v_f16mf2(_Float16 *base, vfloat16mf2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16m1(_Float16 *base, vfloat16m1_t value, size_t vl) { - return vse16_v_f16m1(base, value, vl); + return __riscv_vse16_v_f16m1(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_f16m2( @@ -40,7 +40,7 @@ void test_vse16_v_f16m1(_Float16 *base, vfloat16m1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16m2(_Float16 *base, vfloat16m2_t value, size_t vl) { - return vse16_v_f16m2(base, value, vl); + return __riscv_vse16_v_f16m2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_f16m4( @@ -49,7 +49,7 @@ void test_vse16_v_f16m2(_Float16 *base, vfloat16m2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16m4(_Float16 *base, vfloat16m4_t value, size_t vl) { - return vse16_v_f16m4(base, value, vl); + return __riscv_vse16_v_f16m4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_f16m8( @@ -58,7 +58,7 @@ void test_vse16_v_f16m4(_Float16 *base, vfloat16m4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16m8(_Float16 *base, vfloat16m8_t value, size_t vl) { - return vse16_v_f16m8(base, value, vl); + return __riscv_vse16_v_f16m8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16mf4( @@ -67,7 +67,7 @@ void test_vse16_v_f16m8(_Float16 *base, vfloat16m8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { - return vse16_v_i16mf4(base, value, vl); + return __riscv_vse16_v_i16mf4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16mf2( @@ -76,7 +76,7 @@ void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { - return vse16_v_i16mf2(base, value, vl); + return __riscv_vse16_v_i16mf2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16m1( @@ -85,7 +85,7 @@ void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { - return vse16_v_i16m1(base, value, vl); + return __riscv_vse16_v_i16m1(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16m2( @@ -94,7 +94,7 @@ void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { - return vse16_v_i16m2(base, value, vl); + return __riscv_vse16_v_i16m2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16m4( @@ -103,7 +103,7 @@ void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { - return vse16_v_i16m4(base, value, vl); + return __riscv_vse16_v_i16m4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16m8( @@ -112,7 +112,7 @@ void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { - return vse16_v_i16m8(base, value, vl); + return __riscv_vse16_v_i16m8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16mf4( @@ -121,7 +121,7 @@ void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { - return vse16_v_u16mf4(base, value, vl); + return __riscv_vse16_v_u16mf4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16mf2( @@ -130,7 +130,7 @@ void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { - return vse16_v_u16mf2(base, value, vl); + return __riscv_vse16_v_u16mf2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16m1( @@ -139,7 +139,7 @@ void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { - return vse16_v_u16m1(base, value, vl); + return __riscv_vse16_v_u16m1(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16m2( @@ -148,7 +148,7 @@ void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { - return vse16_v_u16m2(base, value, vl); + return __riscv_vse16_v_u16m2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16m4( @@ -157,7 +157,7 @@ void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { - return vse16_v_u16m4(base, value, vl); + return __riscv_vse16_v_u16m4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16m8( @@ -166,7 +166,7 @@ void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { - return vse16_v_u16m8(base, value, vl); + return __riscv_vse16_v_u16m8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_f16mf4_m( @@ -175,7 +175,7 @@ void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t value, size_t vl) { - return vse16_v_f16mf4_m(mask, base, value, vl); + return __riscv_vse16_v_f16mf4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_f16mf2_m( @@ -184,7 +184,7 @@ void test_vse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t value, size_t vl) { - return vse16_v_f16mf2_m(mask, base, value, vl); + return __riscv_vse16_v_f16mf2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_f16m1_m( @@ -193,7 +193,7 @@ void test_vse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t value, size_t vl) { - return vse16_v_f16m1_m(mask, base, value, vl); + return __riscv_vse16_v_f16m1_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_f16m2_m( @@ -202,7 +202,7 @@ void test_vse16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t value, si // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t value, size_t vl) { - return vse16_v_f16m2_m(mask, base, value, vl); + return __riscv_vse16_v_f16m2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_f16m4_m( @@ -211,7 +211,7 @@ void test_vse16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t value, size_t vl) { - return vse16_v_f16m4_m(mask, base, value, vl); + return __riscv_vse16_v_f16m4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_f16m8_m( @@ -220,7 +220,7 @@ void test_vse16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse16_v_f16m8_m(vbool2_t mask, _Float16 *base, vfloat16m8_t value, size_t vl) { - return vse16_v_f16m8_m(mask, base, value, vl); + return __riscv_vse16_v_f16m8_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16mf4_m( @@ -229,7 +229,7 @@ void test_vse16_v_f16m8_m(vbool2_t mask, _Float16 *base, vfloat16m8_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { - return vse16_v_i16mf4_m(mask, base, value, vl); + return __riscv_vse16_v_i16mf4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16mf2_m( @@ -238,7 +238,7 @@ void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { - return vse16_v_i16mf2_m(mask, base, value, vl); + return __riscv_vse16_v_i16mf2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16m1_m( @@ -247,7 +247,7 @@ void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { - return vse16_v_i16m1_m(mask, base, value, vl); + return __riscv_vse16_v_i16m1_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16m2_m( @@ -256,7 +256,7 @@ void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { - return vse16_v_i16m2_m(mask, base, value, vl); + return __riscv_vse16_v_i16m2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16m4_m( @@ -265,7 +265,7 @@ void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { - return vse16_v_i16m4_m(mask, base, value, vl); + return __riscv_vse16_v_i16m4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_i16m8_m( @@ -274,7 +274,7 @@ void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { - return vse16_v_i16m8_m(mask, base, value, vl); + return __riscv_vse16_v_i16m8_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16mf4_m( @@ -283,7 +283,7 @@ void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { - return vse16_v_u16mf4_m(mask, base, value, vl); + return __riscv_vse16_v_u16mf4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16mf2_m( @@ -292,7 +292,7 @@ void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, s // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { - return vse16_v_u16mf2_m(mask, base, value, vl); + return __riscv_vse16_v_u16mf2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16m1_m( @@ -301,7 +301,7 @@ void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, s // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { - return vse16_v_u16m1_m(mask, base, value, vl); + return __riscv_vse16_v_u16m1_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16m2_m( @@ -310,7 +310,7 @@ void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { - return vse16_v_u16m2_m(mask, base, value, vl); + return __riscv_vse16_v_u16m2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16m4_m( @@ -319,7 +319,7 @@ void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { - return vse16_v_u16m4_m(mask, base, value, vl); + return __riscv_vse16_v_u16m4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse16_v_u16m8_m( @@ -328,6 +328,6 @@ void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { - return vse16_v_u16m8_m(mask, base, value, vl); + return __riscv_vse16_v_u16m8_m(mask, base, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse32.c index 1af53830a96c..2f25b4da5c15 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) { - return vse32_v_f32mf2(base, value, vl); + return __riscv_vse32_v_f32mf2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_f32m1( @@ -22,7 +22,7 @@ void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) { - return vse32_v_f32m1(base, value, vl); + return __riscv_vse32_v_f32m1(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_f32m2( @@ -31,7 +31,7 @@ void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) { - return vse32_v_f32m2(base, value, vl); + return __riscv_vse32_v_f32m2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_f32m4( @@ -40,7 +40,7 @@ void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) { - return vse32_v_f32m4(base, value, vl); + return __riscv_vse32_v_f32m4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_f32m8( @@ -49,7 +49,7 @@ void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) { - return vse32_v_f32m8(base, value, vl); + return __riscv_vse32_v_f32m8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_i32mf2( @@ -58,7 +58,7 @@ void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { - return vse32_v_i32mf2(base, value, vl); + return __riscv_vse32_v_i32mf2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_i32m1( @@ -67,7 +67,7 @@ void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { - return vse32_v_i32m1(base, value, vl); + return __riscv_vse32_v_i32m1(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_i32m2( @@ -76,7 +76,7 @@ void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { - return vse32_v_i32m2(base, value, vl); + return __riscv_vse32_v_i32m2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_i32m4( @@ -85,7 +85,7 @@ void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { - return vse32_v_i32m4(base, value, vl); + return __riscv_vse32_v_i32m4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_i32m8( @@ -94,7 +94,7 @@ void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { - return vse32_v_i32m8(base, value, vl); + return __riscv_vse32_v_i32m8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_u32mf2( @@ -103,7 +103,7 @@ void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { - return vse32_v_u32mf2(base, value, vl); + return __riscv_vse32_v_u32mf2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_u32m1( @@ -112,7 +112,7 @@ void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { - return vse32_v_u32m1(base, value, vl); + return __riscv_vse32_v_u32m1(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_u32m2( @@ -121,7 +121,7 @@ void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { - return vse32_v_u32m2(base, value, vl); + return __riscv_vse32_v_u32m2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_u32m4( @@ -130,7 +130,7 @@ void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { - return vse32_v_u32m4(base, value, vl); + return __riscv_vse32_v_u32m4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_u32m8( @@ -139,7 +139,7 @@ void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { - return vse32_v_u32m8(base, value, vl); + return __riscv_vse32_v_u32m8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_f32mf2_m( @@ -148,7 +148,7 @@ void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) { - return vse32_v_f32mf2_m(mask, base, value, vl); + return __riscv_vse32_v_f32mf2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_f32m1_m( @@ -157,7 +157,7 @@ void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) { - return vse32_v_f32m1_m(mask, base, value, vl); + return __riscv_vse32_v_f32m1_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_f32m2_m( @@ -166,7 +166,7 @@ void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) { - return vse32_v_f32m2_m(mask, base, value, vl); + return __riscv_vse32_v_f32m2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_f32m4_m( @@ -175,7 +175,7 @@ void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) { - return vse32_v_f32m4_m(mask, base, value, vl); + return __riscv_vse32_v_f32m4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_f32m8_m( @@ -184,7 +184,7 @@ void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) { - return vse32_v_f32m8_m(mask, base, value, vl); + return __riscv_vse32_v_f32m8_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_i32mf2_m( @@ -193,7 +193,7 @@ void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { - return vse32_v_i32mf2_m(mask, base, value, vl); + return __riscv_vse32_v_i32mf2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_i32m1_m( @@ -202,7 +202,7 @@ void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { - return vse32_v_i32m1_m(mask, base, value, vl); + return __riscv_vse32_v_i32m1_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_i32m2_m( @@ -211,7 +211,7 @@ void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { - return vse32_v_i32m2_m(mask, base, value, vl); + return __riscv_vse32_v_i32m2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_i32m4_m( @@ -220,7 +220,7 @@ void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { - return vse32_v_i32m4_m(mask, base, value, vl); + return __riscv_vse32_v_i32m4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_i32m8_m( @@ -229,7 +229,7 @@ void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { - return vse32_v_i32m8_m(mask, base, value, vl); + return __riscv_vse32_v_i32m8_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_u32mf2_m( @@ -238,7 +238,7 @@ void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { - return vse32_v_u32mf2_m(mask, base, value, vl); + return __riscv_vse32_v_u32mf2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_u32m1_m( @@ -247,7 +247,7 @@ void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, s // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { - return vse32_v_u32m1_m(mask, base, value, vl); + return __riscv_vse32_v_u32m1_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_u32m2_m( @@ -256,7 +256,7 @@ void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { - return vse32_v_u32m2_m(mask, base, value, vl); + return __riscv_vse32_v_u32m2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_u32m4_m( @@ -265,7 +265,7 @@ void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { - return vse32_v_u32m4_m(mask, base, value, vl); + return __riscv_vse32_v_u32m4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse32_v_u32m8_m( @@ -274,6 +274,6 @@ void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { - return vse32_v_u32m8_m(mask, base, value, vl); + return __riscv_vse32_v_u32m8_m(mask, base, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse64.c index 46901a905600..f84322d6730d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) { - return vse64_v_f64m1(base, value, vl); + return __riscv_vse64_v_f64m1(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_f64m2( @@ -22,7 +22,7 @@ void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) { - return vse64_v_f64m2(base, value, vl); + return __riscv_vse64_v_f64m2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_f64m4( @@ -31,7 +31,7 @@ void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) { - return vse64_v_f64m4(base, value, vl); + return __riscv_vse64_v_f64m4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_f64m8( @@ -40,7 +40,7 @@ void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) { - return vse64_v_f64m8(base, value, vl); + return __riscv_vse64_v_f64m8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_i64m1( @@ -49,7 +49,7 @@ void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { - return vse64_v_i64m1(base, value, vl); + return __riscv_vse64_v_i64m1(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_i64m2( @@ -58,7 +58,7 @@ void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { - return vse64_v_i64m2(base, value, vl); + return __riscv_vse64_v_i64m2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_i64m4( @@ -67,7 +67,7 @@ void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { - return vse64_v_i64m4(base, value, vl); + return __riscv_vse64_v_i64m4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_i64m8( @@ -76,7 +76,7 @@ void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { - return vse64_v_i64m8(base, value, vl); + return __riscv_vse64_v_i64m8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_u64m1( @@ -85,7 +85,7 @@ void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { - return vse64_v_u64m1(base, value, vl); + return __riscv_vse64_v_u64m1(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_u64m2( @@ -94,7 +94,7 @@ void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { - return vse64_v_u64m2(base, value, vl); + return __riscv_vse64_v_u64m2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_u64m4( @@ -103,7 +103,7 @@ void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { - return vse64_v_u64m4(base, value, vl); + return __riscv_vse64_v_u64m4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_u64m8( @@ -112,7 +112,7 @@ void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { - return vse64_v_u64m8(base, value, vl); + return __riscv_vse64_v_u64m8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_f64m1_m( @@ -121,7 +121,7 @@ void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) { - return vse64_v_f64m1_m(mask, base, value, vl); + return __riscv_vse64_v_f64m1_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_f64m2_m( @@ -130,7 +130,7 @@ void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) { - return vse64_v_f64m2_m(mask, base, value, vl); + return __riscv_vse64_v_f64m2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_f64m4_m( @@ -139,7 +139,7 @@ void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) { - return vse64_v_f64m4_m(mask, base, value, vl); + return __riscv_vse64_v_f64m4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_f64m8_m( @@ -148,7 +148,7 @@ void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) { - return vse64_v_f64m8_m(mask, base, value, vl); + return __riscv_vse64_v_f64m8_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_i64m1_m( @@ -157,7 +157,7 @@ void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { - return vse64_v_i64m1_m(mask, base, value, vl); + return __riscv_vse64_v_i64m1_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_i64m2_m( @@ -166,7 +166,7 @@ void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { - return vse64_v_i64m2_m(mask, base, value, vl); + return __riscv_vse64_v_i64m2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_i64m4_m( @@ -175,7 +175,7 @@ void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { - return vse64_v_i64m4_m(mask, base, value, vl); + return __riscv_vse64_v_i64m4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_i64m8_m( @@ -184,7 +184,7 @@ void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { - return vse64_v_i64m8_m(mask, base, value, vl); + return __riscv_vse64_v_i64m8_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_u64m1_m( @@ -193,7 +193,7 @@ void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { - return vse64_v_u64m1_m(mask, base, value, vl); + return __riscv_vse64_v_u64m1_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_u64m2_m( @@ -202,7 +202,7 @@ void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { - return vse64_v_u64m2_m(mask, base, value, vl); + return __riscv_vse64_v_u64m2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_u64m4_m( @@ -211,7 +211,7 @@ void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { - return vse64_v_u64m4_m(mask, base, value, vl); + return __riscv_vse64_v_u64m4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse64_v_u64m8_m( @@ -220,6 +220,6 @@ void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { - return vse64_v_u64m8_m(mask, base, value, vl); + return __riscv_vse64_v_u64m8_m(mask, base, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse8.c index c5a1a24e490a..78de75bfb2f2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vse8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { - return vse8_v_i8mf8(base, value, vl); + return __riscv_vse8_v_i8mf8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { - return vse8_v_i8mf4(base, value, vl); + return __riscv_vse8_v_i8mf4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { - return vse8_v_i8mf2(base, value, vl); + return __riscv_vse8_v_i8mf2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8m1( @@ -39,7 +39,7 @@ void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { - return vse8_v_i8m1(base, value, vl); + return __riscv_vse8_v_i8m1(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8m2( @@ -48,7 +48,7 @@ void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { - return vse8_v_i8m2(base, value, vl); + return __riscv_vse8_v_i8m2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8m4( @@ -57,7 +57,7 @@ void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { - return vse8_v_i8m4(base, value, vl); + return __riscv_vse8_v_i8m4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8m8( @@ -66,7 +66,7 @@ void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { - return vse8_v_i8m8(base, value, vl); + return __riscv_vse8_v_i8m8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8mf8( @@ -75,7 +75,7 @@ void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { - return vse8_v_u8mf8(base, value, vl); + return __riscv_vse8_v_u8mf8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8mf4( @@ -84,7 +84,7 @@ void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { - return vse8_v_u8mf4(base, value, vl); + return __riscv_vse8_v_u8mf4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8mf2( @@ -93,7 +93,7 @@ void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { - return vse8_v_u8mf2(base, value, vl); + return __riscv_vse8_v_u8mf2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8m1( @@ -102,7 +102,7 @@ void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { - return vse8_v_u8m1(base, value, vl); + return __riscv_vse8_v_u8m1(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8m2( @@ -111,7 +111,7 @@ void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { - return vse8_v_u8m2(base, value, vl); + return __riscv_vse8_v_u8m2(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8m4( @@ -120,7 +120,7 @@ void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { - return vse8_v_u8m4(base, value, vl); + return __riscv_vse8_v_u8m4(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8m8( @@ -129,7 +129,7 @@ void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { - return vse8_v_u8m8(base, value, vl); + return __riscv_vse8_v_u8m8(base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8mf8_m( @@ -138,7 +138,7 @@ void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { - return vse8_v_i8mf8_m(mask, base, value, vl); + return __riscv_vse8_v_i8mf8_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8mf4_m( @@ -147,7 +147,7 @@ void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { - return vse8_v_i8mf4_m(mask, base, value, vl); + return __riscv_vse8_v_i8mf4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8mf2_m( @@ -156,7 +156,7 @@ void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { - return vse8_v_i8mf2_m(mask, base, value, vl); + return __riscv_vse8_v_i8mf2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8m1_m( @@ -165,7 +165,7 @@ void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { - return vse8_v_i8m1_m(mask, base, value, vl); + return __riscv_vse8_v_i8m1_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8m2_m( @@ -174,7 +174,7 @@ void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { - return vse8_v_i8m2_m(mask, base, value, vl); + return __riscv_vse8_v_i8m2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8m4_m( @@ -183,7 +183,7 @@ void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { - return vse8_v_i8m4_m(mask, base, value, vl); + return __riscv_vse8_v_i8m4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_i8m8_m( @@ -192,7 +192,7 @@ void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { - return vse8_v_i8m8_m(mask, base, value, vl); + return __riscv_vse8_v_i8m8_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8mf8_m( @@ -201,7 +201,7 @@ void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { - return vse8_v_u8mf8_m(mask, base, value, vl); + return __riscv_vse8_v_u8mf8_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8mf4_m( @@ -210,7 +210,7 @@ void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { - return vse8_v_u8mf4_m(mask, base, value, vl); + return __riscv_vse8_v_u8mf4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8mf2_m( @@ -219,7 +219,7 @@ void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { - return vse8_v_u8mf2_m(mask, base, value, vl); + return __riscv_vse8_v_u8mf2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8m1_m( @@ -228,7 +228,7 @@ void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { - return vse8_v_u8m1_m(mask, base, value, vl); + return __riscv_vse8_v_u8m1_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8m2_m( @@ -237,7 +237,7 @@ void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t v // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { - return vse8_v_u8m2_m(mask, base, value, vl); + return __riscv_vse8_v_u8m2_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8m4_m( @@ -246,7 +246,7 @@ void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t v // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { - return vse8_v_u8m4_m(mask, base, value, vl); + return __riscv_vse8_v_u8m4_m(mask, base, value, vl); } // CHECK-RV64-LABEL: @test_vse8_v_u8m8_m( @@ -255,6 +255,6 @@ void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t v // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { - return vse8_v_u8m8_m(mask, base, value, vl); + return __riscv_vse8_v_u8m8_m(mask, base, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vset.c index e3837d3efccf..f9cae45caea5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vset.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vset.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vset_v_f16m1_f16m2(vfloat16m2_t dest, size_t index, vfloat16m1_t val) { - return vset_v_f16m1_f16m2(dest, 0, val); + return __riscv_vset_v_f16m1_f16m2(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m4( @@ -22,7 +22,7 @@ vfloat16m2_t test_vset_v_f16m1_f16m2(vfloat16m2_t dest, size_t index, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vset_v_f16m1_f16m4(vfloat16m4_t dest, size_t index, vfloat16m1_t val) { - return vset_v_f16m1_f16m4(dest, 0, val); + return __riscv_vset_v_f16m1_f16m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m4( @@ -31,7 +31,7 @@ vfloat16m4_t test_vset_v_f16m1_f16m4(vfloat16m4_t dest, size_t index, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vset_v_f16m2_f16m4(vfloat16m4_t dest, size_t index, vfloat16m2_t val) { - return vset_v_f16m2_f16m4(dest, 0, val); + return __riscv_vset_v_f16m2_f16m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m8( @@ -40,7 +40,7 @@ vfloat16m4_t test_vset_v_f16m2_f16m4(vfloat16m4_t dest, size_t index, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vset_v_f16m1_f16m8(vfloat16m8_t dest, size_t index, vfloat16m1_t val) { - return vset_v_f16m1_f16m8(dest, 0, val); + return __riscv_vset_v_f16m1_f16m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m8( @@ -49,7 +49,7 @@ vfloat16m8_t test_vset_v_f16m1_f16m8(vfloat16m8_t dest, size_t index, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vset_v_f16m2_f16m8(vfloat16m8_t dest, size_t index, vfloat16m2_t val) { - return vset_v_f16m2_f16m8(dest, 0, val); + return __riscv_vset_v_f16m2_f16m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f16m4_f16m8( @@ -58,7 +58,7 @@ vfloat16m8_t test_vset_v_f16m2_f16m8(vfloat16m8_t dest, size_t index, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vset_v_f16m4_f16m8(vfloat16m8_t dest, size_t index, vfloat16m4_t val) { - return vset_v_f16m4_f16m8(dest, 0, val); + return __riscv_vset_v_f16m4_f16m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vset_v_f16m4_f16m8(vfloat16m8_t dest, size_t index, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t val) { - return vset_v_f32m1_f32m2(dest, 0, val); + return __riscv_vset_v_f32m1_f32m2(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4( @@ -76,7 +76,7 @@ vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t val) { - return vset_v_f32m1_f32m4(dest, 0, val); + return __riscv_vset_v_f32m1_f32m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4( @@ -85,7 +85,7 @@ vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t val) { - return vset_v_f32m2_f32m4(dest, 0, val); + return __riscv_vset_v_f32m2_f32m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8( @@ -94,7 +94,7 @@ vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t val) { - return vset_v_f32m1_f32m8(dest, 0, val); + return __riscv_vset_v_f32m1_f32m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8( @@ -103,7 +103,7 @@ vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t val) { - return vset_v_f32m2_f32m8(dest, 0, val); + return __riscv_vset_v_f32m2_f32m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8( @@ -112,7 +112,7 @@ vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t val) { - return vset_v_f32m4_f32m8(dest, 0, val); + return __riscv_vset_v_f32m4_f32m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2( @@ -121,7 +121,7 @@ vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t val) { - return vset_v_f64m1_f64m2(dest, 0, val); + return __riscv_vset_v_f64m1_f64m2(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t val) { - return vset_v_f64m1_f64m4(dest, 0, val); + return __riscv_vset_v_f64m1_f64m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4( @@ -139,7 +139,7 @@ vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t val) { - return vset_v_f64m2_f64m4(dest, 0, val); + return __riscv_vset_v_f64m2_f64m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8( @@ -148,7 +148,7 @@ vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t val) { - return vset_v_f64m1_f64m8(dest, 0, val); + return __riscv_vset_v_f64m1_f64m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8( @@ -157,7 +157,7 @@ vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t val) { - return vset_v_f64m2_f64m8(dest, 0, val); + return __riscv_vset_v_f64m2_f64m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8( @@ -166,7 +166,7 @@ vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t val) { - return vset_v_f64m4_f64m8(dest, 0, val); + return __riscv_vset_v_f64m4_f64m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2( @@ -175,7 +175,7 @@ vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) { - return vset_v_i8m1_i8m2(dest, 0, val); + return __riscv_vset_v_i8m1_i8m2(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4( @@ -184,7 +184,7 @@ vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) { - return vset_v_i8m1_i8m4(dest, 0, val); + return __riscv_vset_v_i8m1_i8m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4( @@ -193,7 +193,7 @@ vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) { - return vset_v_i8m2_i8m4(dest, 0, val); + return __riscv_vset_v_i8m2_i8m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8( @@ -202,7 +202,7 @@ vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) { - return vset_v_i8m1_i8m8(dest, 0, val); + return __riscv_vset_v_i8m1_i8m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8( @@ -211,7 +211,7 @@ vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) { - return vset_v_i8m2_i8m8(dest, 0, val); + return __riscv_vset_v_i8m2_i8m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8( @@ -220,7 +220,7 @@ vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) { - return vset_v_i8m4_i8m8(dest, 0, val); + return __riscv_vset_v_i8m4_i8m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2( @@ -229,7 +229,7 @@ vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val) { - return vset_v_i16m1_i16m2(dest, 0, val); + return __riscv_vset_v_i16m1_i16m2(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4( @@ -238,7 +238,7 @@ vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val) { - return vset_v_i16m1_i16m4(dest, 0, val); + return __riscv_vset_v_i16m1_i16m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4( @@ -247,7 +247,7 @@ vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val) { - return vset_v_i16m2_i16m4(dest, 0, val); + return __riscv_vset_v_i16m2_i16m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8( @@ -256,7 +256,7 @@ vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val) { - return vset_v_i16m1_i16m8(dest, 0, val); + return __riscv_vset_v_i16m1_i16m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8( @@ -265,7 +265,7 @@ vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val) { - return vset_v_i16m2_i16m8(dest, 0, val); + return __riscv_vset_v_i16m2_i16m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8( @@ -274,7 +274,7 @@ vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val) { - return vset_v_i16m4_i16m8(dest, 0, val); + return __riscv_vset_v_i16m4_i16m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2( @@ -283,7 +283,7 @@ vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val) { - return vset_v_i32m1_i32m2(dest, 0, val); + return __riscv_vset_v_i32m1_i32m2(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4( @@ -292,7 +292,7 @@ vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val) { - return vset_v_i32m1_i32m4(dest, 0, val); + return __riscv_vset_v_i32m1_i32m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4( @@ -301,7 +301,7 @@ vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val) { - return vset_v_i32m2_i32m4(dest, 0, val); + return __riscv_vset_v_i32m2_i32m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8( @@ -310,7 +310,7 @@ vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val) { - return vset_v_i32m1_i32m8(dest, 0, val); + return __riscv_vset_v_i32m1_i32m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8( @@ -319,7 +319,7 @@ vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val) { - return vset_v_i32m2_i32m8(dest, 0, val); + return __riscv_vset_v_i32m2_i32m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8( @@ -328,7 +328,7 @@ vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val) { - return vset_v_i32m4_i32m8(dest, 0, val); + return __riscv_vset_v_i32m4_i32m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2( @@ -337,7 +337,7 @@ vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val) { - return vset_v_i64m1_i64m2(dest, 0, val); + return __riscv_vset_v_i64m1_i64m2(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4( @@ -346,7 +346,7 @@ vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val) { - return vset_v_i64m1_i64m4(dest, 0, val); + return __riscv_vset_v_i64m1_i64m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4( @@ -355,7 +355,7 @@ vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val) { - return vset_v_i64m2_i64m4(dest, 0, val); + return __riscv_vset_v_i64m2_i64m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8( @@ -364,7 +364,7 @@ vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val) { - return vset_v_i64m1_i64m8(dest, 0, val); + return __riscv_vset_v_i64m1_i64m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8( @@ -373,7 +373,7 @@ vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val) { - return vset_v_i64m2_i64m8(dest, 0, val); + return __riscv_vset_v_i64m2_i64m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8( @@ -382,7 +382,7 @@ vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val) { - return vset_v_i64m4_i64m8(dest, 0, val); + return __riscv_vset_v_i64m4_i64m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2( @@ -391,7 +391,7 @@ vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) { - return vset_v_u8m1_u8m2(dest, 0, val); + return __riscv_vset_v_u8m1_u8m2(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4( @@ -400,7 +400,7 @@ vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) { - return vset_v_u8m1_u8m4(dest, 0, val); + return __riscv_vset_v_u8m1_u8m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4( @@ -409,7 +409,7 @@ vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) { - return vset_v_u8m2_u8m4(dest, 0, val); + return __riscv_vset_v_u8m2_u8m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8( @@ -418,7 +418,7 @@ vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) { - return vset_v_u8m1_u8m8(dest, 0, val); + return __riscv_vset_v_u8m1_u8m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8( @@ -427,7 +427,7 @@ vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) { - return vset_v_u8m2_u8m8(dest, 0, val); + return __riscv_vset_v_u8m2_u8m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8( @@ -436,7 +436,7 @@ vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) { - return vset_v_u8m4_u8m8(dest, 0, val); + return __riscv_vset_v_u8m4_u8m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2( @@ -445,7 +445,7 @@ vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t val) { - return vset_v_u16m1_u16m2(dest, 0, val); + return __riscv_vset_v_u16m1_u16m2(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4( @@ -454,7 +454,7 @@ vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t val) { - return vset_v_u16m1_u16m4(dest, 0, val); + return __riscv_vset_v_u16m1_u16m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4( @@ -463,7 +463,7 @@ vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t val) { - return vset_v_u16m2_u16m4(dest, 0, val); + return __riscv_vset_v_u16m2_u16m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8( @@ -472,7 +472,7 @@ vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t val) { - return vset_v_u16m1_u16m8(dest, 0, val); + return __riscv_vset_v_u16m1_u16m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8( @@ -481,7 +481,7 @@ vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t val) { - return vset_v_u16m2_u16m8(dest, 0, val); + return __riscv_vset_v_u16m2_u16m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8( @@ -490,7 +490,7 @@ vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t val) { - return vset_v_u16m4_u16m8(dest, 0, val); + return __riscv_vset_v_u16m4_u16m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2( @@ -499,7 +499,7 @@ vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t val) { - return vset_v_u32m1_u32m2(dest, 0, val); + return __riscv_vset_v_u32m1_u32m2(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4( @@ -508,7 +508,7 @@ vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t val) { - return vset_v_u32m1_u32m4(dest, 0, val); + return __riscv_vset_v_u32m1_u32m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4( @@ -517,7 +517,7 @@ vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t val) { - return vset_v_u32m2_u32m4(dest, 0, val); + return __riscv_vset_v_u32m2_u32m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8( @@ -526,7 +526,7 @@ vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t val) { - return vset_v_u32m1_u32m8(dest, 0, val); + return __riscv_vset_v_u32m1_u32m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8( @@ -535,7 +535,7 @@ vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t val) { - return vset_v_u32m2_u32m8(dest, 0, val); + return __riscv_vset_v_u32m2_u32m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8( @@ -544,7 +544,7 @@ vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t val) { - return vset_v_u32m4_u32m8(dest, 0, val); + return __riscv_vset_v_u32m4_u32m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2( @@ -553,7 +553,7 @@ vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t val) { - return vset_v_u64m1_u64m2(dest, 0, val); + return __riscv_vset_v_u64m1_u64m2(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4( @@ -562,7 +562,7 @@ vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t val) { - return vset_v_u64m1_u64m4(dest, 0, val); + return __riscv_vset_v_u64m1_u64m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4( @@ -571,7 +571,7 @@ vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t val) { - return vset_v_u64m2_u64m4(dest, 0, val); + return __riscv_vset_v_u64m2_u64m4(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8( @@ -580,7 +580,7 @@ vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t val) { - return vset_v_u64m1_u64m8(dest, 0, val); + return __riscv_vset_v_u64m1_u64m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8( @@ -589,7 +589,7 @@ vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t val) { - return vset_v_u64m2_u64m8(dest, 0, val); + return __riscv_vset_v_u64m2_u64m8(dest, 0, val); } // CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8( @@ -598,6 +598,6 @@ vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t val) { - return vset_v_u64m4_u64m8(dest, 0, val); + return __riscv_vset_v_u64m4_u64m8(dest, 0, val); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext.c index f833f1f145e6..d57ede5900f9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) { - return vsext_vf2_i16mf4(op1, vl); + return __riscv_vsext_vf2_i16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2( @@ -21,7 +21,7 @@ vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) { - return vsext_vf2_i16mf2(op1, vl); + return __riscv_vsext_vf2_i16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1( @@ -30,7 +30,7 @@ vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) { - return vsext_vf2_i16m1(op1, vl); + return __riscv_vsext_vf2_i16m1(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2( @@ -39,7 +39,7 @@ vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) { - return vsext_vf2_i16m2(op1, vl); + return __riscv_vsext_vf2_i16m2(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4( @@ -48,7 +48,7 @@ vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) { - return vsext_vf2_i16m4(op1, vl); + return __riscv_vsext_vf2_i16m4(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8( @@ -57,7 +57,7 @@ vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) { - return vsext_vf2_i16m8(op1, vl); + return __riscv_vsext_vf2_i16m8(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2( @@ -66,7 +66,7 @@ vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) { - return vsext_vf4_i32mf2(op1, vl); + return __riscv_vsext_vf4_i32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1( @@ -75,7 +75,7 @@ vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) { - return vsext_vf4_i32m1(op1, vl); + return __riscv_vsext_vf4_i32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2( @@ -84,7 +84,7 @@ vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) { - return vsext_vf4_i32m2(op1, vl); + return __riscv_vsext_vf4_i32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4( @@ -93,7 +93,7 @@ vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) { - return vsext_vf4_i32m4(op1, vl); + return __riscv_vsext_vf4_i32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8( @@ -102,7 +102,7 @@ vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) { - return vsext_vf4_i32m8(op1, vl); + return __riscv_vsext_vf4_i32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1( @@ -111,7 +111,7 @@ vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) { - return vsext_vf8_i64m1(op1, vl); + return __riscv_vsext_vf8_i64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2( @@ -120,7 +120,7 @@ vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) { - return vsext_vf8_i64m2(op1, vl); + return __riscv_vsext_vf8_i64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4( @@ -129,7 +129,7 @@ vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) { - return vsext_vf8_i64m4(op1, vl); + return __riscv_vsext_vf8_i64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8( @@ -138,7 +138,7 @@ vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) { - return vsext_vf8_i64m8(op1, vl); + return __riscv_vsext_vf8_i64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2( @@ -147,7 +147,7 @@ vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) { - return vsext_vf2_i32mf2(op1, vl); + return __riscv_vsext_vf2_i32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1( @@ -156,7 +156,7 @@ vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) { - return vsext_vf2_i32m1(op1, vl); + return __riscv_vsext_vf2_i32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2( @@ -165,7 +165,7 @@ vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) { - return vsext_vf2_i32m2(op1, vl); + return __riscv_vsext_vf2_i32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4( @@ -174,7 +174,7 @@ vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) { - return vsext_vf2_i32m4(op1, vl); + return __riscv_vsext_vf2_i32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8( @@ -183,7 +183,7 @@ vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) { - return vsext_vf2_i32m8(op1, vl); + return __riscv_vsext_vf2_i32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1( @@ -192,7 +192,7 @@ vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) { - return vsext_vf4_i64m1(op1, vl); + return __riscv_vsext_vf4_i64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2( @@ -201,7 +201,7 @@ vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) { - return vsext_vf4_i64m2(op1, vl); + return __riscv_vsext_vf4_i64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4( @@ -210,7 +210,7 @@ vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) { - return vsext_vf4_i64m4(op1, vl); + return __riscv_vsext_vf4_i64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8( @@ -219,7 +219,7 @@ vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) { - return vsext_vf4_i64m8(op1, vl); + return __riscv_vsext_vf4_i64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1( @@ -228,7 +228,7 @@ vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) { - return vsext_vf2_i64m1(op1, vl); + return __riscv_vsext_vf2_i64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2( @@ -237,7 +237,7 @@ vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) { - return vsext_vf2_i64m2(op1, vl); + return __riscv_vsext_vf2_i64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4( @@ -246,7 +246,7 @@ vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) { - return vsext_vf2_i64m4(op1, vl); + return __riscv_vsext_vf2_i64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8( @@ -255,7 +255,7 @@ vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) { - return vsext_vf2_i64m8(op1, vl); + return __riscv_vsext_vf2_i64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m( @@ -264,7 +264,7 @@ vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return vsext_vf2_i16mf4_m(mask, op1, vl); + return __riscv_vsext_vf2_i16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m( @@ -273,7 +273,7 @@ vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return vsext_vf2_i16mf2_m(mask, op1, vl); + return __riscv_vsext_vf2_i16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m( @@ -282,7 +282,7 @@ vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return vsext_vf2_i16m1_m(mask, op1, vl); + return __riscv_vsext_vf2_i16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m( @@ -291,7 +291,7 @@ vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return vsext_vf2_i16m2_m(mask, op1, vl); + return __riscv_vsext_vf2_i16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m( @@ -300,7 +300,7 @@ vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return vsext_vf2_i16m4_m(mask, op1, vl); + return __riscv_vsext_vf2_i16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m( @@ -309,7 +309,7 @@ vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint8m4_t op1, size_t vl) { - return vsext_vf2_i16m8_m(mask, op1, vl); + return __riscv_vsext_vf2_i16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( @@ -318,7 +318,7 @@ vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return vsext_vf4_i32mf2_m(mask, op1, vl); + return __riscv_vsext_vf4_i32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( @@ -327,7 +327,7 @@ vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return vsext_vf4_i32m1_m(mask, op1, vl); + return __riscv_vsext_vf4_i32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( @@ -336,7 +336,7 @@ vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return vsext_vf4_i32m2_m(mask, op1, vl); + return __riscv_vsext_vf4_i32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( @@ -345,7 +345,7 @@ vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return vsext_vf4_i32m4_m(mask, op1, vl); + return __riscv_vsext_vf4_i32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( @@ -354,7 +354,7 @@ vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return vsext_vf4_i32m8_m(mask, op1, vl); + return __riscv_vsext_vf4_i32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( @@ -363,7 +363,7 @@ vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return vsext_vf8_i64m1_m(mask, op1, vl); + return __riscv_vsext_vf8_i64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( @@ -372,7 +372,7 @@ vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return vsext_vf8_i64m2_m(mask, op1, vl); + return __riscv_vsext_vf8_i64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( @@ -381,7 +381,7 @@ vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return vsext_vf8_i64m4_m(mask, op1, vl); + return __riscv_vsext_vf8_i64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( @@ -390,7 +390,7 @@ vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return vsext_vf8_i64m8_m(mask, op1, vl); + return __riscv_vsext_vf8_i64m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m( @@ -399,7 +399,7 @@ vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return vsext_vf2_i32mf2_m(mask, op1, vl); + return __riscv_vsext_vf2_i32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m( @@ -408,7 +408,7 @@ vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint16mf4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return vsext_vf2_i32m1_m(mask, op1, vl); + return __riscv_vsext_vf2_i32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m( @@ -417,7 +417,7 @@ vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return vsext_vf2_i32m2_m(mask, op1, vl); + return __riscv_vsext_vf2_i32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m( @@ -426,7 +426,7 @@ vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return vsext_vf2_i32m4_m(mask, op1, vl); + return __riscv_vsext_vf2_i32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m( @@ -435,7 +435,7 @@ vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint16m4_t op1, size_t vl) { - return vsext_vf2_i32m8_m(mask, op1, vl); + return __riscv_vsext_vf2_i32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( @@ -444,7 +444,7 @@ vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return vsext_vf4_i64m1_m(mask, op1, vl); + return __riscv_vsext_vf4_i64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( @@ -453,7 +453,7 @@ vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return vsext_vf4_i64m2_m(mask, op1, vl); + return __riscv_vsext_vf4_i64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( @@ -462,7 +462,7 @@ vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return vsext_vf4_i64m4_m(mask, op1, vl); + return __riscv_vsext_vf4_i64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( @@ -471,7 +471,7 @@ vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return vsext_vf4_i64m8_m(mask, op1, vl); + return __riscv_vsext_vf4_i64m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m( @@ -480,7 +480,7 @@ vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { - return vsext_vf2_i64m1_m(mask, op1, vl); + return __riscv_vsext_vf2_i64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m( @@ -489,7 +489,7 @@ vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint32m1_t op1, size_t vl) { - return vsext_vf2_i64m2_m(mask, op1, vl); + return __riscv_vsext_vf2_i64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m( @@ -498,7 +498,7 @@ vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint32m2_t op1, size_t vl) { - return vsext_vf2_i64m4_m(mask, op1, vl); + return __riscv_vsext_vf2_i64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m( @@ -507,6 +507,6 @@ vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint32m4_t op1, size_t vl) { - return vsext_vf2_i64m8_m(mask, op1, vl); + return __riscv_vsext_vf2_i64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1down.c index f6c97d6b09fe..6d1c8552537a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1down.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf8(src, value, vl); + return __riscv_vslide1down_vx_i8mf8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4( @@ -21,7 +21,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf4(src, value, vl); + return __riscv_vslide1down_vx_i8mf4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2( @@ -30,7 +30,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf2(src, value, vl); + return __riscv_vslide1down_vx_i8mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1( @@ -39,7 +39,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m1(src, value, vl); + return __riscv_vslide1down_vx_i8m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2( @@ -48,7 +48,7 @@ vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m2(src, value, vl); + return __riscv_vslide1down_vx_i8m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4( @@ -57,7 +57,7 @@ vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m4(src, value, vl); + return __riscv_vslide1down_vx_i8m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8( @@ -66,7 +66,7 @@ vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m8(src, value, vl); + return __riscv_vslide1down_vx_i8m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4( @@ -75,7 +75,7 @@ vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf4(src, value, vl); + return __riscv_vslide1down_vx_i16mf4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2( @@ -84,7 +84,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf2(src, value, vl); + return __riscv_vslide1down_vx_i16mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1( @@ -93,7 +93,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m1(src, value, vl); + return __riscv_vslide1down_vx_i16m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2( @@ -102,7 +102,7 @@ vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m2(src, value, vl); + return __riscv_vslide1down_vx_i16m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4( @@ -111,7 +111,7 @@ vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m4(src, value, vl); + return __riscv_vslide1down_vx_i16m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8( @@ -120,7 +120,7 @@ vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m8(src, value, vl); + return __riscv_vslide1down_vx_i16m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2( @@ -129,7 +129,7 @@ vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32mf2(src, value, vl); + return __riscv_vslide1down_vx_i32mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1( @@ -138,7 +138,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m1(src, value, vl); + return __riscv_vslide1down_vx_i32m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2( @@ -147,7 +147,7 @@ vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m2(src, value, vl); + return __riscv_vslide1down_vx_i32m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4( @@ -156,7 +156,7 @@ vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m4(src, value, vl); + return __riscv_vslide1down_vx_i32m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8( @@ -165,7 +165,7 @@ vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m8(src, value, vl); + return __riscv_vslide1down_vx_i32m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1( @@ -174,7 +174,7 @@ vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m1(src, value, vl); + return __riscv_vslide1down_vx_i64m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2( @@ -183,7 +183,7 @@ vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m2(src, value, vl); + return __riscv_vslide1down_vx_i64m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4( @@ -192,7 +192,7 @@ vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m4(src, value, vl); + return __riscv_vslide1down_vx_i64m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8( @@ -201,7 +201,7 @@ vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m8(src, value, vl); + return __riscv_vslide1down_vx_i64m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8( @@ -210,7 +210,7 @@ vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf8(src, value, vl); + return __riscv_vslide1down_vx_u8mf8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4( @@ -219,7 +219,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf4(src, value, vl); + return __riscv_vslide1down_vx_u8mf4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2( @@ -228,7 +228,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf2(src, value, vl); + return __riscv_vslide1down_vx_u8mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1( @@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m1(src, value, vl); + return __riscv_vslide1down_vx_u8m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2( @@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m2(src, value, vl); + return __riscv_vslide1down_vx_u8m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4( @@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m4(src, value, vl); + return __riscv_vslide1down_vx_u8m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8( @@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m8(src, value, vl); + return __riscv_vslide1down_vx_u8m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4( @@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf4(src, value, vl); + return __riscv_vslide1down_vx_u16mf4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2( @@ -282,7 +282,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf2(src, value, vl); + return __riscv_vslide1down_vx_u16mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1( @@ -291,7 +291,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m1(src, value, vl); + return __riscv_vslide1down_vx_u16m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2( @@ -300,7 +300,7 @@ vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m2(src, value, vl); + return __riscv_vslide1down_vx_u16m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4( @@ -309,7 +309,7 @@ vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m4(src, value, vl); + return __riscv_vslide1down_vx_u16m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8( @@ -318,7 +318,7 @@ vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m8(src, value, vl); + return __riscv_vslide1down_vx_u16m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2( @@ -327,7 +327,7 @@ vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32mf2(src, value, vl); + return __riscv_vslide1down_vx_u32mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1( @@ -336,7 +336,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m1(src, value, vl); + return __riscv_vslide1down_vx_u32m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2( @@ -345,7 +345,7 @@ vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m2(src, value, vl); + return __riscv_vslide1down_vx_u32m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4( @@ -354,7 +354,7 @@ vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m4(src, value, vl); + return __riscv_vslide1down_vx_u32m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8( @@ -363,7 +363,7 @@ vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m8(src, value, vl); + return __riscv_vslide1down_vx_u32m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1( @@ -372,7 +372,7 @@ vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m1(src, value, vl); + return __riscv_vslide1down_vx_u64m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2( @@ -381,7 +381,7 @@ vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m2(src, value, vl); + return __riscv_vslide1down_vx_u64m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4( @@ -390,7 +390,7 @@ vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m4(src, value, vl); + return __riscv_vslide1down_vx_u64m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m8(src, value, vl); + return __riscv_vslide1down_vx_u64m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf8_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i8mf8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t va // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i8mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m( @@ -426,7 +426,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t va // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i8mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m( @@ -435,7 +435,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t va // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m1_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i8m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m( @@ -444,7 +444,7 @@ vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i8m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m( @@ -453,7 +453,7 @@ vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i8m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m( @@ -462,7 +462,7 @@ vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m8_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i8m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m( @@ -471,7 +471,7 @@ vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m( @@ -480,7 +480,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m( @@ -489,7 +489,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m1_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m( @@ -498,7 +498,7 @@ vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m( @@ -507,7 +507,7 @@ vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t va // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m( @@ -516,7 +516,7 @@ vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t va // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m8_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m( @@ -525,7 +525,7 @@ vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t va // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32mf2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m( @@ -534,7 +534,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m1_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m( @@ -543,7 +543,7 @@ vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m( @@ -552,7 +552,7 @@ vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m( @@ -561,7 +561,7 @@ vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t va // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m8_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m( @@ -570,7 +570,7 @@ vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t va // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m1_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m( @@ -579,7 +579,7 @@ vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m( @@ -588,7 +588,7 @@ vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m( @@ -597,7 +597,7 @@ vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m8_m(mask, src, value, vl); + return __riscv_vslide1down_vx_i64m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m( @@ -606,7 +606,7 @@ vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t va // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf8_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u8mf8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m( @@ -615,7 +615,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u8mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m( @@ -624,7 +624,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u8mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m( @@ -633,7 +633,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m1_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u8m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m( @@ -642,7 +642,7 @@ vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u8m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m( @@ -651,7 +651,7 @@ vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u8m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m( @@ -660,7 +660,7 @@ vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m8_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u8m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m( @@ -669,7 +669,7 @@ vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m( @@ -678,7 +678,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m( @@ -687,7 +687,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m1_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m( @@ -696,7 +696,7 @@ vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m( @@ -705,7 +705,7 @@ vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m( @@ -714,7 +714,7 @@ vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m8_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m( @@ -723,7 +723,7 @@ vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32mf2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m( @@ -732,7 +732,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m1_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m( @@ -741,7 +741,7 @@ vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m( @@ -750,7 +750,7 @@ vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m( @@ -759,7 +759,7 @@ vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m8_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m( @@ -768,7 +768,7 @@ vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m1_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m( @@ -777,7 +777,7 @@ vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m2_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m( @@ -786,7 +786,7 @@ vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m4_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m8_m(mask, src, value, vl); + return __riscv_vslide1down_vx_u64m8_m(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1up.c index 2ed0ed20ab3c..d11684f25ff7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslide1up.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf8(src, value, vl); + return __riscv_vslide1up_vx_i8mf8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4( @@ -21,7 +21,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf4(src, value, vl); + return __riscv_vslide1up_vx_i8mf4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2( @@ -30,7 +30,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf2(src, value, vl); + return __riscv_vslide1up_vx_i8mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1( @@ -39,7 +39,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m1(src, value, vl); + return __riscv_vslide1up_vx_i8m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2( @@ -48,7 +48,7 @@ vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m2(src, value, vl); + return __riscv_vslide1up_vx_i8m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4( @@ -57,7 +57,7 @@ vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m4(src, value, vl); + return __riscv_vslide1up_vx_i8m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8( @@ -66,7 +66,7 @@ vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m8(src, value, vl); + return __riscv_vslide1up_vx_i8m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4( @@ -75,7 +75,7 @@ vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf4(src, value, vl); + return __riscv_vslide1up_vx_i16mf4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2( @@ -84,7 +84,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf2(src, value, vl); + return __riscv_vslide1up_vx_i16mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1( @@ -93,7 +93,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m1(src, value, vl); + return __riscv_vslide1up_vx_i16m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2( @@ -102,7 +102,7 @@ vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m2(src, value, vl); + return __riscv_vslide1up_vx_i16m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4( @@ -111,7 +111,7 @@ vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m4(src, value, vl); + return __riscv_vslide1up_vx_i16m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8( @@ -120,7 +120,7 @@ vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m8(src, value, vl); + return __riscv_vslide1up_vx_i16m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2( @@ -129,7 +129,7 @@ vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32mf2(src, value, vl); + return __riscv_vslide1up_vx_i32mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1( @@ -138,7 +138,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m1(src, value, vl); + return __riscv_vslide1up_vx_i32m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2( @@ -147,7 +147,7 @@ vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m2(src, value, vl); + return __riscv_vslide1up_vx_i32m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4( @@ -156,7 +156,7 @@ vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m4(src, value, vl); + return __riscv_vslide1up_vx_i32m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8( @@ -165,7 +165,7 @@ vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m8(src, value, vl); + return __riscv_vslide1up_vx_i32m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1( @@ -174,7 +174,7 @@ vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m1(src, value, vl); + return __riscv_vslide1up_vx_i64m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2( @@ -183,7 +183,7 @@ vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m2(src, value, vl); + return __riscv_vslide1up_vx_i64m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4( @@ -192,7 +192,7 @@ vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m4(src, value, vl); + return __riscv_vslide1up_vx_i64m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8( @@ -201,7 +201,7 @@ vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m8(src, value, vl); + return __riscv_vslide1up_vx_i64m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8( @@ -210,7 +210,7 @@ vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf8(src, value, vl); + return __riscv_vslide1up_vx_u8mf8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4( @@ -219,7 +219,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf4(src, value, vl); + return __riscv_vslide1up_vx_u8mf4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2( @@ -228,7 +228,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf2(src, value, vl); + return __riscv_vslide1up_vx_u8mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1( @@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m1(src, value, vl); + return __riscv_vslide1up_vx_u8m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2( @@ -246,7 +246,7 @@ vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m2(src, value, vl); + return __riscv_vslide1up_vx_u8m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4( @@ -255,7 +255,7 @@ vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m4(src, value, vl); + return __riscv_vslide1up_vx_u8m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8( @@ -264,7 +264,7 @@ vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m8(src, value, vl); + return __riscv_vslide1up_vx_u8m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4( @@ -273,7 +273,7 @@ vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf4(src, value, vl); + return __riscv_vslide1up_vx_u16mf4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2( @@ -282,7 +282,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf2(src, value, vl); + return __riscv_vslide1up_vx_u16mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1( @@ -291,7 +291,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m1(src, value, vl); + return __riscv_vslide1up_vx_u16m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2( @@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m2(src, value, vl); + return __riscv_vslide1up_vx_u16m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4( @@ -309,7 +309,7 @@ vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m4(src, value, vl); + return __riscv_vslide1up_vx_u16m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8( @@ -318,7 +318,7 @@ vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m8(src, value, vl); + return __riscv_vslide1up_vx_u16m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2( @@ -327,7 +327,7 @@ vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32mf2(src, value, vl); + return __riscv_vslide1up_vx_u32mf2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1( @@ -336,7 +336,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m1(src, value, vl); + return __riscv_vslide1up_vx_u32m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2( @@ -345,7 +345,7 @@ vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m2(src, value, vl); + return __riscv_vslide1up_vx_u32m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4( @@ -354,7 +354,7 @@ vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m4(src, value, vl); + return __riscv_vslide1up_vx_u32m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8( @@ -363,7 +363,7 @@ vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m8(src, value, vl); + return __riscv_vslide1up_vx_u32m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1( @@ -372,7 +372,7 @@ vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m1(src, value, vl); + return __riscv_vslide1up_vx_u64m1(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2( @@ -381,7 +381,7 @@ vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m2(src, value, vl); + return __riscv_vslide1up_vx_u64m2(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4( @@ -390,7 +390,7 @@ vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m4(src, value, vl); + return __riscv_vslide1up_vx_u64m4(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m8(src, value, vl); + return __riscv_vslide1up_vx_u64m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf8_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i8mf8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t valu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i8mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m( @@ -426,7 +426,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t valu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i8mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m( @@ -435,7 +435,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t valu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m1_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i8m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m( @@ -444,7 +444,7 @@ vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i8m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m( @@ -453,7 +453,7 @@ vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i8m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m( @@ -462,7 +462,7 @@ vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m8_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i8m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m( @@ -471,7 +471,7 @@ vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m( @@ -480,7 +480,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m( @@ -489,7 +489,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m1_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m( @@ -498,7 +498,7 @@ vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m( @@ -507,7 +507,7 @@ vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t valu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m( @@ -516,7 +516,7 @@ vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t valu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m8_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m( @@ -525,7 +525,7 @@ vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t valu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32mf2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m( @@ -534,7 +534,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m1_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m( @@ -543,7 +543,7 @@ vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m( @@ -552,7 +552,7 @@ vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m( @@ -561,7 +561,7 @@ vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t valu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m8_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m( @@ -570,7 +570,7 @@ vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t valu // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m1_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m( @@ -579,7 +579,7 @@ vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m( @@ -588,7 +588,7 @@ vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m( @@ -597,7 +597,7 @@ vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t val // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m8_m(mask, src, value, vl); + return __riscv_vslide1up_vx_i64m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m( @@ -606,7 +606,7 @@ vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t valu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf8_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u8mf8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m( @@ -615,7 +615,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u8mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m( @@ -624,7 +624,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u8mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m( @@ -633,7 +633,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m1_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u8m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m( @@ -642,7 +642,7 @@ vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u8m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m( @@ -651,7 +651,7 @@ vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u8m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m( @@ -660,7 +660,7 @@ vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m8_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u8m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m( @@ -669,7 +669,7 @@ vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u16mf4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m( @@ -678,7 +678,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u16mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m( @@ -687,7 +687,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m1_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u16m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m( @@ -696,7 +696,7 @@ vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u16m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m( @@ -705,7 +705,7 @@ vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u16m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m( @@ -714,7 +714,7 @@ vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m8_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u16m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m( @@ -723,7 +723,7 @@ vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32mf2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u32mf2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m( @@ -732,7 +732,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m1_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u32m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m( @@ -741,7 +741,7 @@ vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u32m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m( @@ -750,7 +750,7 @@ vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u32m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m( @@ -759,7 +759,7 @@ vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m8_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u32m8_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m( @@ -768,7 +768,7 @@ vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m1_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u64m1_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m( @@ -777,7 +777,7 @@ vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m2_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u64m2_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m( @@ -786,7 +786,7 @@ vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m4_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u64m4_m(mask, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m8_m(mask, src, value, vl); + return __riscv_vslide1up_vx_u64m8_m(mask, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c index b3ccddd5246b..c3ac7605e94a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf4(src, offset, vl); + return __riscv_vslidedown_vx_f16mf4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t src, size_t offset, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf2(src, offset, vl); + return __riscv_vslidedown_vx_f16mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t src, size_t offset, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m1(src, offset, vl); + return __riscv_vslidedown_vx_f16m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m2(src, offset, vl); + return __riscv_vslidedown_vx_f16m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m4(src, offset, vl); + return __riscv_vslidedown_vx_f16m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m8(src, offset, vl); + return __riscv_vslidedown_vx_f16m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2(src, offset, vl); + return __riscv_vslidedown_vx_f32mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t src, size_t offset, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m1(src, offset, vl); + return __riscv_vslidedown_vx_f32m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m2(src, offset, vl); + return __riscv_vslidedown_vx_f32m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m4(src, offset, vl); + return __riscv_vslidedown_vx_f32m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m8(src, offset, vl); + return __riscv_vslidedown_vx_f32m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m1(src, offset, vl); + return __riscv_vslidedown_vx_f64m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m2(src, offset, vl); + return __riscv_vslidedown_vx_f64m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m4(src, offset, vl); + return __riscv_vslidedown_vx_f64m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m8(src, offset, vl); + return __riscv_vslidedown_vx_f64m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8( @@ -148,7 +148,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t src, size_t offset, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf8(src, offset, vl); + return __riscv_vslidedown_vx_i8mf8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4( @@ -157,7 +157,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf4(src, offset, vl); + return __riscv_vslidedown_vx_i8mf4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2( @@ -166,7 +166,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf2(src, offset, vl); + return __riscv_vslidedown_vx_i8mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1( @@ -175,7 +175,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m1(src, offset, vl); + return __riscv_vslidedown_vx_i8m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2( @@ -184,7 +184,7 @@ vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m2(src, offset, vl); + return __riscv_vslidedown_vx_i8m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4( @@ -193,7 +193,7 @@ vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m4(src, offset, vl); + return __riscv_vslidedown_vx_i8m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8( @@ -202,7 +202,7 @@ vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m8(src, offset, vl); + return __riscv_vslidedown_vx_i8m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4( @@ -211,7 +211,7 @@ vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf4(src, offset, vl); + return __riscv_vslidedown_vx_i16mf4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2( @@ -220,7 +220,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf2(src, offset, vl); + return __riscv_vslidedown_vx_i16mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1( @@ -229,7 +229,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m1(src, offset, vl); + return __riscv_vslidedown_vx_i16m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2( @@ -238,7 +238,7 @@ vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m2(src, offset, vl); + return __riscv_vslidedown_vx_i16m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4( @@ -247,7 +247,7 @@ vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m4(src, offset, vl); + return __riscv_vslidedown_vx_i16m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8( @@ -256,7 +256,7 @@ vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m8(src, offset, vl); + return __riscv_vslidedown_vx_i16m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2( @@ -265,7 +265,7 @@ vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2(src, offset, vl); + return __riscv_vslidedown_vx_i32mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1( @@ -274,7 +274,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m1(src, offset, vl); + return __riscv_vslidedown_vx_i32m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2( @@ -283,7 +283,7 @@ vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m2(src, offset, vl); + return __riscv_vslidedown_vx_i32m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4( @@ -292,7 +292,7 @@ vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m4(src, offset, vl); + return __riscv_vslidedown_vx_i32m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8( @@ -301,7 +301,7 @@ vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m8(src, offset, vl); + return __riscv_vslidedown_vx_i32m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1( @@ -310,7 +310,7 @@ vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m1(src, offset, vl); + return __riscv_vslidedown_vx_i64m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2( @@ -319,7 +319,7 @@ vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m2(src, offset, vl); + return __riscv_vslidedown_vx_i64m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4( @@ -328,7 +328,7 @@ vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m4(src, offset, vl); + return __riscv_vslidedown_vx_i64m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8( @@ -337,7 +337,7 @@ vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m8(src, offset, vl); + return __riscv_vslidedown_vx_i64m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8( @@ -346,7 +346,7 @@ vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf8(src, offset, vl); + return __riscv_vslidedown_vx_u8mf8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4( @@ -355,7 +355,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf4(src, offset, vl); + return __riscv_vslidedown_vx_u8mf4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2( @@ -364,7 +364,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf2(src, offset, vl); + return __riscv_vslidedown_vx_u8mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1( @@ -373,7 +373,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m1(src, offset, vl); + return __riscv_vslidedown_vx_u8m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2( @@ -382,7 +382,7 @@ vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m2(src, offset, vl); + return __riscv_vslidedown_vx_u8m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4( @@ -391,7 +391,7 @@ vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m4(src, offset, vl); + return __riscv_vslidedown_vx_u8m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8( @@ -400,7 +400,7 @@ vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m8(src, offset, vl); + return __riscv_vslidedown_vx_u8m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4( @@ -409,7 +409,7 @@ vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t src, size_t offset, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf4(src, offset, vl); + return __riscv_vslidedown_vx_u16mf4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2( @@ -418,7 +418,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t src, size_t offset, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf2(src, offset, vl); + return __riscv_vslidedown_vx_u16mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1( @@ -427,7 +427,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t src, size_t offset, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m1(src, offset, vl); + return __riscv_vslidedown_vx_u16m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2( @@ -436,7 +436,7 @@ vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m2(src, offset, vl); + return __riscv_vslidedown_vx_u16m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4( @@ -445,7 +445,7 @@ vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m4(src, offset, vl); + return __riscv_vslidedown_vx_u16m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8( @@ -454,7 +454,7 @@ vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m8(src, offset, vl); + return __riscv_vslidedown_vx_u16m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2( @@ -463,7 +463,7 @@ vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2(src, offset, vl); + return __riscv_vslidedown_vx_u32mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1( @@ -472,7 +472,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t src, size_t offset, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m1(src, offset, vl); + return __riscv_vslidedown_vx_u32m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2( @@ -481,7 +481,7 @@ vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m2(src, offset, vl); + return __riscv_vslidedown_vx_u32m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4( @@ -490,7 +490,7 @@ vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m4(src, offset, vl); + return __riscv_vslidedown_vx_u32m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8( @@ -499,7 +499,7 @@ vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m8(src, offset, vl); + return __riscv_vslidedown_vx_u32m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1( @@ -508,7 +508,7 @@ vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m1(src, offset, vl); + return __riscv_vslidedown_vx_u64m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2( @@ -517,7 +517,7 @@ vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m2(src, offset, vl); + return __riscv_vslidedown_vx_u64m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4( @@ -526,7 +526,7 @@ vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m4(src, offset, vl); + return __riscv_vslidedown_vx_u64m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8( @@ -535,7 +535,7 @@ vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m8(src, offset, vl); + return __riscv_vslidedown_vx_u64m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m( @@ -544,7 +544,7 @@ vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t src, size_t offset, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f16mf4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m( @@ -553,7 +553,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f16mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m( @@ -562,7 +562,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m1_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f16m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m( @@ -571,7 +571,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f16m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m( @@ -580,7 +580,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f16m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m( @@ -589,7 +589,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f16m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m( @@ -598,7 +598,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f32mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m( @@ -607,7 +607,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m1_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f32m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m( @@ -616,7 +616,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f32m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m( @@ -625,7 +625,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f32m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m( @@ -634,7 +634,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f32m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m( @@ -643,7 +643,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m1_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f64m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m( @@ -652,7 +652,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f64m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m( @@ -661,7 +661,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f64m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m( @@ -670,7 +670,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_f64m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m( @@ -679,7 +679,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i8mf8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m( @@ -688,7 +688,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i8mf4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m( @@ -697,7 +697,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i8mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m( @@ -706,7 +706,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m1_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i8m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m( @@ -715,7 +715,7 @@ vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i8m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m( @@ -724,7 +724,7 @@ vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i8m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m( @@ -733,7 +733,7 @@ vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i8m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m( @@ -742,7 +742,7 @@ vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i16mf4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m( @@ -751,7 +751,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i16mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m( @@ -760,7 +760,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m1_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i16m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m( @@ -769,7 +769,7 @@ vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i16m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m( @@ -778,7 +778,7 @@ vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t src, size_t offs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i16m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m( @@ -787,7 +787,7 @@ vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t src, size_t offs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i16m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m( @@ -796,7 +796,7 @@ vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t src, size_t offs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i32mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m( @@ -805,7 +805,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m1_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i32m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m( @@ -814,7 +814,7 @@ vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i32m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m( @@ -823,7 +823,7 @@ vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i32m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m( @@ -832,7 +832,7 @@ vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t src, size_t offs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i32m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m( @@ -841,7 +841,7 @@ vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t src, size_t offs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m1_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i64m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m( @@ -850,7 +850,7 @@ vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i64m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m( @@ -859,7 +859,7 @@ vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i64m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m( @@ -868,7 +868,7 @@ vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_i64m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m( @@ -877,7 +877,7 @@ vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t src, size_t offs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u8mf8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m( @@ -886,7 +886,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, size_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u8mf4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m( @@ -895,7 +895,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, size_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u8mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m( @@ -904,7 +904,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, size_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m1_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u8m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m( @@ -913,7 +913,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, size_t offse // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u8m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m( @@ -922,7 +922,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, size_t offse // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u8m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m( @@ -931,7 +931,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, size_t offse // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u8m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m( @@ -940,7 +940,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, size_t offse // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u16mf4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m( @@ -949,7 +949,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u16mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m( @@ -958,7 +958,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m1_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u16m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m( @@ -967,7 +967,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, size_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u16m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m( @@ -976,7 +976,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, size_t of // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u16m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m( @@ -985,7 +985,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, size_t of // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u16m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m( @@ -994,7 +994,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, size_t of // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u32mf2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m( @@ -1003,7 +1003,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m1_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u32m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m( @@ -1012,7 +1012,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, size_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u32m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m( @@ -1021,7 +1021,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, size_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u32m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m( @@ -1030,7 +1030,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, size_t of // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u32m8_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m( @@ -1039,7 +1039,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, size_t of // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m1_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u64m1_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m( @@ -1048,7 +1048,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, size_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m2_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u64m2_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m( @@ -1057,7 +1057,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, size_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m4_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u64m4_m(mask, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m( @@ -1066,6 +1066,6 @@ vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, size_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m8_m(mask, src, offset, vl); + return __riscv_vslidedown_vx_u64m8_m(mask, src, offset, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c index 1530fe2b61a6..efcb1cf8abf2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslideup.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf4(dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf2(dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m1(dest, src, offset, vl); + return __riscv_vslideup_vx_f16m1(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2( @@ -40,7 +40,7 @@ vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m2(dest, src, offset, vl); + return __riscv_vslideup_vx_f16m2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4( @@ -49,7 +49,7 @@ vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m4(dest, src, offset, vl); + return __riscv_vslideup_vx_f16m4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8( @@ -58,7 +58,7 @@ vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m8(dest, src, offset, vl); + return __riscv_vslideup_vx_f16m8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2( @@ -67,7 +67,7 @@ vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32mf2(dest, src, offset, vl); + return __riscv_vslideup_vx_f32mf2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m1(dest, src, offset, vl); + return __riscv_vslideup_vx_f32m1(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2( @@ -85,7 +85,7 @@ vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m2(dest, src, offset, vl); + return __riscv_vslideup_vx_f32m2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4( @@ -94,7 +94,7 @@ vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m4(dest, src, offset, vl); + return __riscv_vslideup_vx_f32m4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8( @@ -103,7 +103,7 @@ vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m8(dest, src, offset, vl); + return __riscv_vslideup_vx_f32m8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1( @@ -112,7 +112,7 @@ vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m1(dest, src, offset, vl); + return __riscv_vslideup_vx_f64m1(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2( @@ -121,7 +121,7 @@ vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m2(dest, src, offset, vl); + return __riscv_vslideup_vx_f64m2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4( @@ -130,7 +130,7 @@ vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m4(dest, src, offset, vl); + return __riscv_vslideup_vx_f64m4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8( @@ -139,7 +139,7 @@ vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m8(dest, src, offset, vl); + return __riscv_vslideup_vx_f64m8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8( @@ -148,7 +148,7 @@ vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf8(dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4( @@ -157,7 +157,7 @@ vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf4(dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2( @@ -166,7 +166,7 @@ vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf2(dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1( @@ -175,7 +175,7 @@ vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m1(dest, src, offset, vl); + return __riscv_vslideup_vx_i8m1(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2( @@ -184,7 +184,7 @@ vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m2(dest, src, offset, vl); + return __riscv_vslideup_vx_i8m2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4( @@ -193,7 +193,7 @@ vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m4(dest, src, offset, vl); + return __riscv_vslideup_vx_i8m4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8( @@ -202,7 +202,7 @@ vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m8(dest, src, offset, vl); + return __riscv_vslideup_vx_i8m8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4( @@ -211,7 +211,7 @@ vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf4(dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2( @@ -220,7 +220,7 @@ vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t of // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf2(dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1( @@ -229,7 +229,7 @@ vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t of // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m1(dest, src, offset, vl); + return __riscv_vslideup_vx_i16m1(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2( @@ -238,7 +238,7 @@ vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m2(dest, src, offset, vl); + return __riscv_vslideup_vx_i16m2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4( @@ -247,7 +247,7 @@ vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m4(dest, src, offset, vl); + return __riscv_vslideup_vx_i16m4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8( @@ -256,7 +256,7 @@ vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m8(dest, src, offset, vl); + return __riscv_vslideup_vx_i16m8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2( @@ -265,7 +265,7 @@ vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32mf2(dest, src, offset, vl); + return __riscv_vslideup_vx_i32mf2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1( @@ -274,7 +274,7 @@ vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t of // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m1(dest, src, offset, vl); + return __riscv_vslideup_vx_i32m1(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2( @@ -283,7 +283,7 @@ vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m2(dest, src, offset, vl); + return __riscv_vslideup_vx_i32m2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4( @@ -292,7 +292,7 @@ vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m4(dest, src, offset, vl); + return __riscv_vslideup_vx_i32m4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8( @@ -301,7 +301,7 @@ vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m8(dest, src, offset, vl); + return __riscv_vslideup_vx_i32m8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1( @@ -310,7 +310,7 @@ vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m1(dest, src, offset, vl); + return __riscv_vslideup_vx_i64m1(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2( @@ -319,7 +319,7 @@ vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m2(dest, src, offset, vl); + return __riscv_vslideup_vx_i64m2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4( @@ -328,7 +328,7 @@ vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m4(dest, src, offset, vl); + return __riscv_vslideup_vx_i64m4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8( @@ -337,7 +337,7 @@ vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m8(dest, src, offset, vl); + return __riscv_vslideup_vx_i64m8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8( @@ -346,7 +346,7 @@ vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf8(dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4( @@ -355,7 +355,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf4(dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2( @@ -364,7 +364,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf2(dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1( @@ -373,7 +373,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m1(dest, src, offset, vl); + return __riscv_vslideup_vx_u8m1(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2( @@ -382,7 +382,7 @@ vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m2(dest, src, offset, vl); + return __riscv_vslideup_vx_u8m2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4( @@ -391,7 +391,7 @@ vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m4(dest, src, offset, vl); + return __riscv_vslideup_vx_u8m4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8( @@ -400,7 +400,7 @@ vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m8(dest, src, offset, vl); + return __riscv_vslideup_vx_u8m8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4( @@ -409,7 +409,7 @@ vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf4(dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2( @@ -418,7 +418,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf2(dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1( @@ -427,7 +427,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m1(dest, src, offset, vl); + return __riscv_vslideup_vx_u16m1(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2( @@ -436,7 +436,7 @@ vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m2(dest, src, offset, vl); + return __riscv_vslideup_vx_u16m2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4( @@ -445,7 +445,7 @@ vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m4(dest, src, offset, vl); + return __riscv_vslideup_vx_u16m4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8( @@ -454,7 +454,7 @@ vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m8(dest, src, offset, vl); + return __riscv_vslideup_vx_u16m8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2( @@ -463,7 +463,7 @@ vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32mf2(dest, src, offset, vl); + return __riscv_vslideup_vx_u32mf2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1( @@ -472,7 +472,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m1(dest, src, offset, vl); + return __riscv_vslideup_vx_u32m1(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2( @@ -481,7 +481,7 @@ vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m2(dest, src, offset, vl); + return __riscv_vslideup_vx_u32m2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4( @@ -490,7 +490,7 @@ vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m4(dest, src, offset, vl); + return __riscv_vslideup_vx_u32m4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8( @@ -499,7 +499,7 @@ vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m8(dest, src, offset, vl); + return __riscv_vslideup_vx_u32m8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1( @@ -508,7 +508,7 @@ vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m1(dest, src, offset, vl); + return __riscv_vslideup_vx_u64m1(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2( @@ -517,7 +517,7 @@ vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m2(dest, src, offset, vl); + return __riscv_vslideup_vx_u64m2(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4( @@ -526,7 +526,7 @@ vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m4(dest, src, offset, vl); + return __riscv_vslideup_vx_u64m4(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8( @@ -535,7 +535,7 @@ vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m8(dest, src, offset, vl); + return __riscv_vslideup_vx_u64m8(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_m( @@ -544,7 +544,7 @@ vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_m( @@ -553,7 +553,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t dest, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_m( @@ -562,7 +562,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t dest, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m1_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m1_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_m( @@ -571,7 +571,7 @@ vfloat16m1_t test_vslideup_vx_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_m(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_m( @@ -580,7 +580,7 @@ vfloat16m2_t test_vslideup_vx_f16m2_m(vbool8_t mask, vfloat16m2_t dest, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_m(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_m( @@ -589,7 +589,7 @@ vfloat16m4_t test_vslideup_vx_f16m4_m(vbool4_t mask, vfloat16m4_t dest, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_m(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m( @@ -598,7 +598,7 @@ vfloat16m8_t test_vslideup_vx_f16m8_m(vbool2_t mask, vfloat16m8_t dest, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32mf2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32mf2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m( @@ -607,7 +607,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dest, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m1_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m1_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m( @@ -616,7 +616,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m( @@ -625,7 +625,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dest, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m( @@ -634,7 +634,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dest, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m( @@ -643,7 +643,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dest, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m1_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m1_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m( @@ -652,7 +652,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m( @@ -661,7 +661,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dest, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m( @@ -670,7 +670,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dest, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m( @@ -679,7 +679,7 @@ vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dest, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m( @@ -688,7 +688,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dest, vint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m( @@ -697,7 +697,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dest, vint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m( @@ -706,7 +706,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dest, vint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m1_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m1_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m( @@ -715,7 +715,7 @@ vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m( @@ -724,7 +724,7 @@ vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dest, vint8m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m( @@ -733,7 +733,7 @@ vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dest, vint8m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m( @@ -742,7 +742,7 @@ vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dest, vint8m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m( @@ -751,7 +751,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dest, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m( @@ -760,7 +760,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dest, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m1_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m1_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m( @@ -769,7 +769,7 @@ vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m( @@ -778,7 +778,7 @@ vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dest, vint16m2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m( @@ -787,7 +787,7 @@ vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dest, vint16m4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m( @@ -796,7 +796,7 @@ vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dest, vint16m8_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32mf2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32mf2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m( @@ -805,7 +805,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dest, vint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m1_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m1_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m( @@ -814,7 +814,7 @@ vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m( @@ -823,7 +823,7 @@ vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dest, vint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m( @@ -832,7 +832,7 @@ vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dest, vint32m4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m( @@ -841,7 +841,7 @@ vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dest, vint32m8_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m1_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m1_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m( @@ -850,7 +850,7 @@ vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m( @@ -859,7 +859,7 @@ vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dest, vint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m( @@ -868,7 +868,7 @@ vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dest, vint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m( @@ -877,7 +877,7 @@ vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dest, vint64m8_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m( @@ -886,7 +886,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dest, vuint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m( @@ -895,7 +895,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dest, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m( @@ -904,7 +904,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dest, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m1_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m1_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m( @@ -913,7 +913,7 @@ vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m( @@ -922,7 +922,7 @@ vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dest, vuint8m2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m( @@ -931,7 +931,7 @@ vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dest, vuint8m4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m( @@ -940,7 +940,7 @@ vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dest, vuint8m8_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m( @@ -949,7 +949,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dest, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m( @@ -958,7 +958,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dest, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m1_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m1_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m( @@ -967,7 +967,7 @@ vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m( @@ -976,7 +976,7 @@ vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dest, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m( @@ -985,7 +985,7 @@ vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dest, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m( @@ -994,7 +994,7 @@ vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dest, vuint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32mf2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32mf2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m( @@ -1003,7 +1003,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dest, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m1_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m1_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m( @@ -1012,7 +1012,7 @@ vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m( @@ -1021,7 +1021,7 @@ vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dest, vuint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m( @@ -1030,7 +1030,7 @@ vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dest, vuint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m8_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m( @@ -1039,7 +1039,7 @@ vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dest, vuint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m1_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m1_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m( @@ -1048,7 +1048,7 @@ vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m2_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m2_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m( @@ -1057,7 +1057,7 @@ vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dest, vuint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m4_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m4_m(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m( @@ -1066,6 +1066,6 @@ vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dest, vuint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m8_m(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m8_m(mask, dest, src, offset, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsll.c index 82c790121887..c15b7532007d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsll.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_i8mf8(op1, shift, vl); + return __riscv_vsll_vv_i8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf8(op1, shift, vl); + return __riscv_vsll_vx_i8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_i8mf4(op1, shift, vl); + return __riscv_vsll_vv_i8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf4(op1, shift, vl); + return __riscv_vsll_vx_i8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_i8mf2(op1, shift, vl); + return __riscv_vsll_vv_i8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf2(op1, shift, vl); + return __riscv_vsll_vx_i8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_i8m1(op1, shift, vl); + return __riscv_vsll_vv_i8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m1(op1, shift, vl); + return __riscv_vsll_vx_i8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_i8m2(op1, shift, vl); + return __riscv_vsll_vv_i8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m2(op1, shift, vl); + return __riscv_vsll_vx_i8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_i8m4(op1, shift, vl); + return __riscv_vsll_vv_i8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m4(op1, shift, vl); + return __riscv_vsll_vx_i8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_i8m8(op1, shift, vl); + return __riscv_vsll_vv_i8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m8(op1, shift, vl); + return __riscv_vsll_vx_i8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_i16mf4(op1, shift, vl); + return __riscv_vsll_vv_i16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf4(op1, shift, vl); + return __riscv_vsll_vx_i16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_i16mf2(op1, shift, vl); + return __riscv_vsll_vv_i16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf2(op1, shift, vl); + return __riscv_vsll_vx_i16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_i16m1(op1, shift, vl); + return __riscv_vsll_vv_i16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m1(op1, shift, vl); + return __riscv_vsll_vx_i16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_i16m2(op1, shift, vl); + return __riscv_vsll_vv_i16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m2(op1, shift, vl); + return __riscv_vsll_vx_i16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_i16m4(op1, shift, vl); + return __riscv_vsll_vv_i16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m4(op1, shift, vl); + return __riscv_vsll_vx_i16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_i16m8(op1, shift, vl); + return __riscv_vsll_vv_i16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m8(op1, shift, vl); + return __riscv_vsll_vx_i16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_i32mf2(op1, shift, vl); + return __riscv_vsll_vv_i32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32mf2(op1, shift, vl); + return __riscv_vsll_vx_i32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_i32m1(op1, shift, vl); + return __riscv_vsll_vv_i32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m1(op1, shift, vl); + return __riscv_vsll_vx_i32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_i32m2(op1, shift, vl); + return __riscv_vsll_vv_i32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m2(op1, shift, vl); + return __riscv_vsll_vx_i32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_i32m4(op1, shift, vl); + return __riscv_vsll_vv_i32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m4(op1, shift, vl); + return __riscv_vsll_vx_i32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_i32m8(op1, shift, vl); + return __riscv_vsll_vv_i32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m8(op1, shift, vl); + return __riscv_vsll_vx_i32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_i64m1(op1, shift, vl); + return __riscv_vsll_vv_i64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m1(op1, shift, vl); + return __riscv_vsll_vx_i64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_i64m2(op1, shift, vl); + return __riscv_vsll_vv_i64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m2(op1, shift, vl); + return __riscv_vsll_vx_i64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_i64m4(op1, shift, vl); + return __riscv_vsll_vv_i64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m4(op1, shift, vl); + return __riscv_vsll_vx_i64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_i64m8(op1, shift, vl); + return __riscv_vsll_vv_i64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m8(op1, shift, vl); + return __riscv_vsll_vx_i64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8( @@ -408,7 +408,7 @@ vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_u8mf8(op1, shift, vl); + return __riscv_vsll_vv_u8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8( @@ -417,7 +417,7 @@ vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf8(op1, shift, vl); + return __riscv_vsll_vx_u8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4( @@ -426,7 +426,7 @@ vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_u8mf4(op1, shift, vl); + return __riscv_vsll_vv_u8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4( @@ -435,7 +435,7 @@ vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf4(op1, shift, vl); + return __riscv_vsll_vx_u8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2( @@ -444,7 +444,7 @@ vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_u8mf2(op1, shift, vl); + return __riscv_vsll_vv_u8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2( @@ -453,7 +453,7 @@ vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf2(op1, shift, vl); + return __riscv_vsll_vx_u8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m1( @@ -462,7 +462,7 @@ vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_u8m1(op1, shift, vl); + return __riscv_vsll_vv_u8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m1( @@ -471,7 +471,7 @@ vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m1(op1, shift, vl); + return __riscv_vsll_vx_u8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m2( @@ -480,7 +480,7 @@ vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_u8m2(op1, shift, vl); + return __riscv_vsll_vv_u8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m2( @@ -489,7 +489,7 @@ vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m2(op1, shift, vl); + return __riscv_vsll_vx_u8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m4( @@ -498,7 +498,7 @@ vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_u8m4(op1, shift, vl); + return __riscv_vsll_vv_u8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m4( @@ -507,7 +507,7 @@ vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m4(op1, shift, vl); + return __riscv_vsll_vx_u8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m8( @@ -516,7 +516,7 @@ vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_u8m8(op1, shift, vl); + return __riscv_vsll_vv_u8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m8( @@ -525,7 +525,7 @@ vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m8(op1, shift, vl); + return __riscv_vsll_vx_u8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4( @@ -534,7 +534,7 @@ vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_u16mf4(op1, shift, vl); + return __riscv_vsll_vv_u16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4( @@ -543,7 +543,7 @@ vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf4(op1, shift, vl); + return __riscv_vsll_vx_u16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2( @@ -552,7 +552,7 @@ vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_u16mf2(op1, shift, vl); + return __riscv_vsll_vv_u16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2( @@ -561,7 +561,7 @@ vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf2(op1, shift, vl); + return __riscv_vsll_vx_u16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m1( @@ -570,7 +570,7 @@ vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_u16m1(op1, shift, vl); + return __riscv_vsll_vv_u16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m1( @@ -579,7 +579,7 @@ vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m1(op1, shift, vl); + return __riscv_vsll_vx_u16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m2( @@ -588,7 +588,7 @@ vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_u16m2(op1, shift, vl); + return __riscv_vsll_vv_u16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m2( @@ -597,7 +597,7 @@ vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m2(op1, shift, vl); + return __riscv_vsll_vx_u16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m4( @@ -606,7 +606,7 @@ vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_u16m4(op1, shift, vl); + return __riscv_vsll_vv_u16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m4( @@ -615,7 +615,7 @@ vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m4(op1, shift, vl); + return __riscv_vsll_vx_u16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m8( @@ -624,7 +624,7 @@ vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_u16m8(op1, shift, vl); + return __riscv_vsll_vv_u16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m8( @@ -633,7 +633,7 @@ vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m8(op1, shift, vl); + return __riscv_vsll_vx_u16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2( @@ -642,7 +642,7 @@ vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_u32mf2(op1, shift, vl); + return __riscv_vsll_vv_u32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2( @@ -651,7 +651,7 @@ vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32mf2(op1, shift, vl); + return __riscv_vsll_vx_u32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m1( @@ -660,7 +660,7 @@ vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_u32m1(op1, shift, vl); + return __riscv_vsll_vv_u32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m1( @@ -669,7 +669,7 @@ vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m1(op1, shift, vl); + return __riscv_vsll_vx_u32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m2( @@ -678,7 +678,7 @@ vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_u32m2(op1, shift, vl); + return __riscv_vsll_vv_u32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m2( @@ -687,7 +687,7 @@ vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m2(op1, shift, vl); + return __riscv_vsll_vx_u32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m4( @@ -696,7 +696,7 @@ vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_u32m4(op1, shift, vl); + return __riscv_vsll_vv_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m4( @@ -705,7 +705,7 @@ vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m4(op1, shift, vl); + return __riscv_vsll_vx_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m8( @@ -714,7 +714,7 @@ vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_u32m8(op1, shift, vl); + return __riscv_vsll_vv_u32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m8( @@ -723,7 +723,7 @@ vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m8(op1, shift, vl); + return __riscv_vsll_vx_u32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m1( @@ -732,7 +732,7 @@ vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_u64m1(op1, shift, vl); + return __riscv_vsll_vv_u64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m1( @@ -741,7 +741,7 @@ vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m1(op1, shift, vl); + return __riscv_vsll_vx_u64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m2( @@ -750,7 +750,7 @@ vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_u64m2(op1, shift, vl); + return __riscv_vsll_vv_u64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m2( @@ -759,7 +759,7 @@ vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m2(op1, shift, vl); + return __riscv_vsll_vx_u64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m4( @@ -768,7 +768,7 @@ vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_u64m4(op1, shift, vl); + return __riscv_vsll_vv_u64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m4( @@ -777,7 +777,7 @@ vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m4(op1, shift, vl); + return __riscv_vsll_vx_u64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m8( @@ -786,7 +786,7 @@ vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_u64m8(op1, shift, vl); + return __riscv_vsll_vv_u64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m8( @@ -795,7 +795,7 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m8(op1, shift, vl); + return __riscv_vsll_vx_u64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m( @@ -804,7 +804,7 @@ vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_i8mf8_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m( @@ -813,7 +813,7 @@ vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf8_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m( @@ -822,7 +822,7 @@ vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_i8mf4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m( @@ -831,7 +831,7 @@ vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m( @@ -840,7 +840,7 @@ vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_i8mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m( @@ -849,7 +849,7 @@ vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m( @@ -858,7 +858,7 @@ vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_i8m1_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m( @@ -867,7 +867,7 @@ vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m1_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m( @@ -876,7 +876,7 @@ vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_i8m2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m( @@ -885,7 +885,7 @@ vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m( @@ -894,7 +894,7 @@ vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_i8m4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m( @@ -903,7 +903,7 @@ vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m( @@ -912,7 +912,7 @@ vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_i8m8_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m( @@ -921,7 +921,7 @@ vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m8_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m( @@ -930,7 +930,7 @@ vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_i16mf4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m( @@ -939,7 +939,7 @@ vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m( @@ -948,7 +948,7 @@ vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_i16mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m( @@ -957,7 +957,7 @@ vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m( @@ -966,7 +966,7 @@ vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_i16m1_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m( @@ -975,7 +975,7 @@ vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m1_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m( @@ -984,7 +984,7 @@ vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_i16m2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m( @@ -993,7 +993,7 @@ vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m( @@ -1002,7 +1002,7 @@ vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_i16m4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m( @@ -1011,7 +1011,7 @@ vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m( @@ -1020,7 +1020,7 @@ vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_i16m8_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m( @@ -1029,7 +1029,7 @@ vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m8_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m( @@ -1038,7 +1038,7 @@ vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_i32mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_i32m1_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m( @@ -1065,7 +1065,7 @@ vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m1_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m( @@ -1074,7 +1074,7 @@ vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_i32m2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m( @@ -1083,7 +1083,7 @@ vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m( @@ -1092,7 +1092,7 @@ vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_i32m4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m( @@ -1101,7 +1101,7 @@ vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m( @@ -1110,7 +1110,7 @@ vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_i32m8_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m( @@ -1119,7 +1119,7 @@ vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m8_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m( @@ -1128,7 +1128,7 @@ vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_i64m1_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m( @@ -1137,7 +1137,7 @@ vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m1_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m( @@ -1146,7 +1146,7 @@ vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_i64m2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m( @@ -1155,7 +1155,7 @@ vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m( @@ -1164,7 +1164,7 @@ vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_i64m4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m( @@ -1173,7 +1173,7 @@ vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m( @@ -1182,7 +1182,7 @@ vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_i64m8_m(mask, op1, shift, vl); + return __riscv_vsll_vv_i64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m( @@ -1191,7 +1191,7 @@ vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m8_m(mask, op1, shift, vl); + return __riscv_vsll_vx_i64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m( @@ -1200,7 +1200,7 @@ vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_u8mf8_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf8_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_u8mf4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_u8mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_u8m1_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m1_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_u8m2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_u8m4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_u8m8_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m8_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_u16mf4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_u16mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_u16m1_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m1_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_u16m2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_u16m4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_u16m8_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m8_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_u32mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32mf2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_u32m1_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m1_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_u32m2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_u32m4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_u32m8_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m8_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_u64m1_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m1_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_u64m2_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m2_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_u64m4_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m4_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_u64m8_m(mask, op1, shift, vl); + return __riscv_vsll_vv_u64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m8_m(mask, op1, shift, vl); + return __riscv_vsll_vx_u64m8_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm.c index ba1627dc716e..a18b4a96b135 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsm_v_b1(uint8_t *base, vbool1_t value, size_t vl) { - return vsm_v_b1(base, value, vl); + return __riscv_vsm_v_b1(base, value, vl); } // CHECK-RV64-LABEL: @test_vsm_v_b2( @@ -21,7 +21,7 @@ void test_vsm_v_b1(uint8_t *base, vbool1_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vsm_v_b2(uint8_t *base, vbool2_t value, size_t vl) { - return vsm_v_b2(base, value, vl); + return __riscv_vsm_v_b2(base, value, vl); } // CHECK-RV64-LABEL: @test_vsm_v_b4( @@ -30,7 +30,7 @@ void test_vsm_v_b2(uint8_t *base, vbool2_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vsm_v_b4(uint8_t *base, vbool4_t value, size_t vl) { - return vsm_v_b4(base, value, vl); + return __riscv_vsm_v_b4(base, value, vl); } // CHECK-RV64-LABEL: @test_vsm_v_b8( @@ -39,7 +39,7 @@ void test_vsm_v_b4(uint8_t *base, vbool4_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vsm_v_b8(uint8_t *base, vbool8_t value, size_t vl) { - return vsm_v_b8(base, value, vl); + return __riscv_vsm_v_b8(base, value, vl); } // CHECK-RV64-LABEL: @test_vsm_v_b16( @@ -48,7 +48,7 @@ void test_vsm_v_b8(uint8_t *base, vbool8_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vsm_v_b16(uint8_t *base, vbool16_t value, size_t vl) { - return vsm_v_b16(base, value, vl); + return __riscv_vsm_v_b16(base, value, vl); } // CHECK-RV64-LABEL: @test_vsm_v_b32( @@ -57,7 +57,7 @@ void test_vsm_v_b16(uint8_t *base, vbool16_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vsm_v_b32(uint8_t *base, vbool32_t value, size_t vl) { - return vsm_v_b32(base, value, vl); + return __riscv_vsm_v_b32(base, value, vl); } // CHECK-RV64-LABEL: @test_vsm_v_b64( @@ -66,6 +66,6 @@ void test_vsm_v_b32(uint8_t *base, vbool32_t value, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vsm_v_b64(uint8_t *base, vbool64_t value, size_t vl) { - return vsm_v_b64(base, value, vl); + return __riscv_vsm_v_b64(base, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c index 22b9bfb5166c..7285134c6723 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsmul.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsmul_vv_i8mf8(op1, op2, vl); + return __riscv_vsmul_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf8(op1, op2, vl); + return __riscv_vsmul_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsmul_vv_i8mf4(op1, op2, vl); + return __riscv_vsmul_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf4(op1, op2, vl); + return __riscv_vsmul_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsmul_vv_i8mf2(op1, op2, vl); + return __riscv_vsmul_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf2(op1, op2, vl); + return __riscv_vsmul_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsmul_vv_i8m1(op1, op2, vl); + return __riscv_vsmul_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m1(op1, op2, vl); + return __riscv_vsmul_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsmul_vv_i8m2(op1, op2, vl); + return __riscv_vsmul_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m2(op1, op2, vl); + return __riscv_vsmul_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsmul_vv_i8m4(op1, op2, vl); + return __riscv_vsmul_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m4(op1, op2, vl); + return __riscv_vsmul_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsmul_vv_i8m8(op1, op2, vl); + return __riscv_vsmul_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m8(op1, op2, vl); + return __riscv_vsmul_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsmul_vv_i16mf4(op1, op2, vl); + return __riscv_vsmul_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf4(op1, op2, vl); + return __riscv_vsmul_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsmul_vv_i16mf2(op1, op2, vl); + return __riscv_vsmul_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf2(op1, op2, vl); + return __riscv_vsmul_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsmul_vv_i16m1(op1, op2, vl); + return __riscv_vsmul_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m1(op1, op2, vl); + return __riscv_vsmul_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsmul_vv_i16m2(op1, op2, vl); + return __riscv_vsmul_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m2(op1, op2, vl); + return __riscv_vsmul_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsmul_vv_i16m4(op1, op2, vl); + return __riscv_vsmul_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m4(op1, op2, vl); + return __riscv_vsmul_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsmul_vv_i16m8(op1, op2, vl); + return __riscv_vsmul_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m8(op1, op2, vl); + return __riscv_vsmul_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsmul_vv_i32mf2(op1, op2, vl); + return __riscv_vsmul_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32mf2(op1, op2, vl); + return __riscv_vsmul_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsmul_vv_i32m1(op1, op2, vl); + return __riscv_vsmul_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m1(op1, op2, vl); + return __riscv_vsmul_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsmul_vv_i32m2(op1, op2, vl); + return __riscv_vsmul_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m2(op1, op2, vl); + return __riscv_vsmul_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsmul_vv_i32m4(op1, op2, vl); + return __riscv_vsmul_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m4(op1, op2, vl); + return __riscv_vsmul_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsmul_vv_i32m8(op1, op2, vl); + return __riscv_vsmul_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m8(op1, op2, vl); + return __riscv_vsmul_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul_vv_i64m1(op1, op2, vl); + return __riscv_vsmul_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m1(op1, op2, vl); + return __riscv_vsmul_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul_vv_i64m2(op1, op2, vl); + return __riscv_vsmul_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m2(op1, op2, vl); + return __riscv_vsmul_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul_vv_i64m4(op1, op2, vl); + return __riscv_vsmul_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m4(op1, op2, vl); + return __riscv_vsmul_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul_vv_i64m8(op1, op2, vl); + return __riscv_vsmul_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m8(op1, op2, vl); + return __riscv_vsmul_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsmul_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsmul_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsmul_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsmul_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsmul_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsmul_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsmul_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsmul_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsmul_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsmul_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsmul_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsmul_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsmul_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsmul_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsmul_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsmul_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsmul_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsmul_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei16.c index 2117e2385067..455ca3401c13 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei16_v_f16mf4(base, bindex, value, vl); + return __riscv_vsoxei16_v_f16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei16_v_f16mf2(base, bindex, value, vl); + return __riscv_vsoxei16_v_f16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei16_v_f16m1(base, bindex, value, vl); + return __riscv_vsoxei16_v_f16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei16_v_f16m2(base, bindex, value, vl); + return __riscv_vsoxei16_v_f16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4( @@ -49,7 +49,7 @@ void test_vsoxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei16_v_f16m4(base, bindex, value, vl); + return __riscv_vsoxei16_v_f16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8( @@ -58,7 +58,7 @@ void test_vsoxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return vsoxei16_v_f16m8(base, bindex, value, vl); + return __riscv_vsoxei16_v_f16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2( @@ -67,7 +67,7 @@ void test_vsoxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return vsoxei16_v_f32mf2(base, bindex, value, vl); + return __riscv_vsoxei16_v_f32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1( @@ -76,7 +76,7 @@ void test_vsoxei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return vsoxei16_v_f32m1(base, bindex, value, vl); + return __riscv_vsoxei16_v_f32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2( @@ -85,7 +85,7 @@ void test_vsoxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return vsoxei16_v_f32m2(base, bindex, value, vl); + return __riscv_vsoxei16_v_f32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4( @@ -94,7 +94,7 @@ void test_vsoxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return vsoxei16_v_f32m4(base, bindex, value, vl); + return __riscv_vsoxei16_v_f32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8( @@ -103,7 +103,7 @@ void test_vsoxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return vsoxei16_v_f32m8(base, bindex, value, vl); + return __riscv_vsoxei16_v_f32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1( @@ -112,7 +112,7 @@ void test_vsoxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return vsoxei16_v_f64m1(base, bindex, value, vl); + return __riscv_vsoxei16_v_f64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2( @@ -121,7 +121,7 @@ void test_vsoxei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return vsoxei16_v_f64m2(base, bindex, value, vl); + return __riscv_vsoxei16_v_f64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4( @@ -130,7 +130,7 @@ void test_vsoxei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return vsoxei16_v_f64m4(base, bindex, value, vl); + return __riscv_vsoxei16_v_f64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8( @@ -139,7 +139,7 @@ void test_vsoxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return vsoxei16_v_f64m8(base, bindex, value, vl); + return __riscv_vsoxei16_v_f64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8( @@ -148,7 +148,7 @@ void test_vsoxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return vsoxei16_v_i8mf8(base, bindex, value, vl); + return __riscv_vsoxei16_v_i8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4( @@ -157,7 +157,7 @@ void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return vsoxei16_v_i8mf4(base, bindex, value, vl); + return __riscv_vsoxei16_v_i8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2( @@ -166,7 +166,7 @@ void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return vsoxei16_v_i8mf2(base, bindex, value, vl); + return __riscv_vsoxei16_v_i8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1( @@ -175,7 +175,7 @@ void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return vsoxei16_v_i8m1(base, bindex, value, vl); + return __riscv_vsoxei16_v_i8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2( @@ -184,7 +184,7 @@ void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return vsoxei16_v_i8m2(base, bindex, value, vl); + return __riscv_vsoxei16_v_i8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4( @@ -193,7 +193,7 @@ void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return vsoxei16_v_i8m4(base, bindex, value, vl); + return __riscv_vsoxei16_v_i8m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4( @@ -202,7 +202,7 @@ void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return vsoxei16_v_i16mf4(base, bindex, value, vl); + return __riscv_vsoxei16_v_i16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2( @@ -211,7 +211,7 @@ void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return vsoxei16_v_i16mf2(base, bindex, value, vl); + return __riscv_vsoxei16_v_i16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1( @@ -220,7 +220,7 @@ void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return vsoxei16_v_i16m1(base, bindex, value, vl); + return __riscv_vsoxei16_v_i16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2( @@ -229,7 +229,7 @@ void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return vsoxei16_v_i16m2(base, bindex, value, vl); + return __riscv_vsoxei16_v_i16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4( @@ -238,7 +238,7 @@ void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return vsoxei16_v_i16m4(base, bindex, value, vl); + return __riscv_vsoxei16_v_i16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8( @@ -247,7 +247,7 @@ void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return vsoxei16_v_i16m8(base, bindex, value, vl); + return __riscv_vsoxei16_v_i16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2( @@ -256,7 +256,7 @@ void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return vsoxei16_v_i32mf2(base, bindex, value, vl); + return __riscv_vsoxei16_v_i32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1( @@ -265,7 +265,7 @@ void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return vsoxei16_v_i32m1(base, bindex, value, vl); + return __riscv_vsoxei16_v_i32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2( @@ -274,7 +274,7 @@ void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return vsoxei16_v_i32m2(base, bindex, value, vl); + return __riscv_vsoxei16_v_i32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4( @@ -283,7 +283,7 @@ void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return vsoxei16_v_i32m4(base, bindex, value, vl); + return __riscv_vsoxei16_v_i32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8( @@ -292,7 +292,7 @@ void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return vsoxei16_v_i32m8(base, bindex, value, vl); + return __riscv_vsoxei16_v_i32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1( @@ -301,7 +301,7 @@ void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return vsoxei16_v_i64m1(base, bindex, value, vl); + return __riscv_vsoxei16_v_i64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2( @@ -310,7 +310,7 @@ void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return vsoxei16_v_i64m2(base, bindex, value, vl); + return __riscv_vsoxei16_v_i64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4( @@ -319,7 +319,7 @@ void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return vsoxei16_v_i64m4(base, bindex, value, vl); + return __riscv_vsoxei16_v_i64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8( @@ -328,7 +328,7 @@ void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return vsoxei16_v_i64m8(base, bindex, value, vl); + return __riscv_vsoxei16_v_i64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8( @@ -337,7 +337,7 @@ void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return vsoxei16_v_u8mf8(base, bindex, value, vl); + return __riscv_vsoxei16_v_u8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4( @@ -346,7 +346,7 @@ void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return vsoxei16_v_u8mf4(base, bindex, value, vl); + return __riscv_vsoxei16_v_u8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2( @@ -355,7 +355,7 @@ void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return vsoxei16_v_u8mf2(base, bindex, value, vl); + return __riscv_vsoxei16_v_u8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1( @@ -364,7 +364,7 @@ void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return vsoxei16_v_u8m1(base, bindex, value, vl); + return __riscv_vsoxei16_v_u8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2( @@ -373,7 +373,7 @@ void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return vsoxei16_v_u8m2(base, bindex, value, vl); + return __riscv_vsoxei16_v_u8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4( @@ -382,7 +382,7 @@ void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return vsoxei16_v_u8m4(base, bindex, value, vl); + return __riscv_vsoxei16_v_u8m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4( @@ -391,7 +391,7 @@ void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return vsoxei16_v_u16mf4(base, bindex, value, vl); + return __riscv_vsoxei16_v_u16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2( @@ -400,7 +400,7 @@ void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t va // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return vsoxei16_v_u16mf2(base, bindex, value, vl); + return __riscv_vsoxei16_v_u16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1( @@ -409,7 +409,7 @@ void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t va // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return vsoxei16_v_u16m1(base, bindex, value, vl); + return __riscv_vsoxei16_v_u16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2( @@ -418,7 +418,7 @@ void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return vsoxei16_v_u16m2(base, bindex, value, vl); + return __riscv_vsoxei16_v_u16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4( @@ -427,7 +427,7 @@ void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return vsoxei16_v_u16m4(base, bindex, value, vl); + return __riscv_vsoxei16_v_u16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8( @@ -436,7 +436,7 @@ void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return vsoxei16_v_u16m8(base, bindex, value, vl); + return __riscv_vsoxei16_v_u16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2( @@ -445,7 +445,7 @@ void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return vsoxei16_v_u32mf2(base, bindex, value, vl); + return __riscv_vsoxei16_v_u32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1( @@ -454,7 +454,7 @@ void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t va // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return vsoxei16_v_u32m1(base, bindex, value, vl); + return __riscv_vsoxei16_v_u32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2( @@ -463,7 +463,7 @@ void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return vsoxei16_v_u32m2(base, bindex, value, vl); + return __riscv_vsoxei16_v_u32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4( @@ -472,7 +472,7 @@ void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return vsoxei16_v_u32m4(base, bindex, value, vl); + return __riscv_vsoxei16_v_u32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8( @@ -481,7 +481,7 @@ void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return vsoxei16_v_u32m8(base, bindex, value, vl); + return __riscv_vsoxei16_v_u32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1( @@ -490,7 +490,7 @@ void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return vsoxei16_v_u64m1(base, bindex, value, vl); + return __riscv_vsoxei16_v_u64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2( @@ -499,7 +499,7 @@ void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return vsoxei16_v_u64m2(base, bindex, value, vl); + return __riscv_vsoxei16_v_u64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4( @@ -508,7 +508,7 @@ void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return vsoxei16_v_u64m4(base, bindex, value, vl); + return __riscv_vsoxei16_v_u64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8( @@ -517,7 +517,7 @@ void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return vsoxei16_v_u64m8(base, bindex, value, vl); + return __riscv_vsoxei16_v_u64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4_m( @@ -526,7 +526,7 @@ void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei16_v_f16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2_m( @@ -535,7 +535,7 @@ void test_vsoxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei16_v_f16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1_m( @@ -544,7 +544,7 @@ void test_vsoxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei16_v_f16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2_m( @@ -553,7 +553,7 @@ void test_vsoxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei16_v_f16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4_m( @@ -562,7 +562,7 @@ void test_vsoxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei16_v_f16m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8_m( @@ -571,7 +571,7 @@ void test_vsoxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return vsoxei16_v_f16m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2_m( @@ -580,7 +580,7 @@ void test_vsoxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return vsoxei16_v_f32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1_m( @@ -589,7 +589,7 @@ void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return vsoxei16_v_f32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2_m( @@ -598,7 +598,7 @@ void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return vsoxei16_v_f32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4_m( @@ -607,7 +607,7 @@ void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return vsoxei16_v_f32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8_m( @@ -616,7 +616,7 @@ void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return vsoxei16_v_f32m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1_m( @@ -625,7 +625,7 @@ void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return vsoxei16_v_f64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2_m( @@ -634,7 +634,7 @@ void test_vsoxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return vsoxei16_v_f64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4_m( @@ -643,7 +643,7 @@ void test_vsoxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return vsoxei16_v_f64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8_m( @@ -652,7 +652,7 @@ void test_vsoxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return vsoxei16_v_f64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_f64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8_m( @@ -661,7 +661,7 @@ void test_vsoxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return vsoxei16_v_i8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4_m( @@ -670,7 +670,7 @@ void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return vsoxei16_v_i8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2_m( @@ -679,7 +679,7 @@ void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return vsoxei16_v_i8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1_m( @@ -688,7 +688,7 @@ void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return vsoxei16_v_i8m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2_m( @@ -697,7 +697,7 @@ void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return vsoxei16_v_i8m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4_m( @@ -706,7 +706,7 @@ void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return vsoxei16_v_i8m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i8m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4_m( @@ -715,7 +715,7 @@ void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return vsoxei16_v_i16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2_m( @@ -724,7 +724,7 @@ void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return vsoxei16_v_i16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1_m( @@ -733,7 +733,7 @@ void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return vsoxei16_v_i16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2_m( @@ -742,7 +742,7 @@ void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return vsoxei16_v_i16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4_m( @@ -751,7 +751,7 @@ void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return vsoxei16_v_i16m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8_m( @@ -760,7 +760,7 @@ void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return vsoxei16_v_i16m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2_m( @@ -769,7 +769,7 @@ void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return vsoxei16_v_i32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1_m( @@ -778,7 +778,7 @@ void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return vsoxei16_v_i32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2_m( @@ -787,7 +787,7 @@ void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return vsoxei16_v_i32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4_m( @@ -796,7 +796,7 @@ void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return vsoxei16_v_i32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8_m( @@ -805,7 +805,7 @@ void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return vsoxei16_v_i32m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1_m( @@ -814,7 +814,7 @@ void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return vsoxei16_v_i64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2_m( @@ -823,7 +823,7 @@ void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return vsoxei16_v_i64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4_m( @@ -832,7 +832,7 @@ void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return vsoxei16_v_i64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8_m( @@ -841,7 +841,7 @@ void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return vsoxei16_v_i64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_i64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8_m( @@ -850,7 +850,7 @@ void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return vsoxei16_v_u8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4_m( @@ -859,7 +859,7 @@ void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return vsoxei16_v_u8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2_m( @@ -868,7 +868,7 @@ void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return vsoxei16_v_u8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1_m( @@ -877,7 +877,7 @@ void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return vsoxei16_v_u8m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2_m( @@ -886,7 +886,7 @@ void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return vsoxei16_v_u8m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4_m( @@ -895,7 +895,7 @@ void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return vsoxei16_v_u8m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u8m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4_m( @@ -904,7 +904,7 @@ void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return vsoxei16_v_u16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2_m( @@ -913,7 +913,7 @@ void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return vsoxei16_v_u16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1_m( @@ -922,7 +922,7 @@ void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return vsoxei16_v_u16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2_m( @@ -931,7 +931,7 @@ void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return vsoxei16_v_u16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4_m( @@ -940,7 +940,7 @@ void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return vsoxei16_v_u16m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8_m( @@ -949,7 +949,7 @@ void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return vsoxei16_v_u16m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2_m( @@ -958,7 +958,7 @@ void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return vsoxei16_v_u32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1_m( @@ -967,7 +967,7 @@ void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return vsoxei16_v_u32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2_m( @@ -976,7 +976,7 @@ void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return vsoxei16_v_u32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4_m( @@ -985,7 +985,7 @@ void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return vsoxei16_v_u32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8_m( @@ -994,7 +994,7 @@ void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return vsoxei16_v_u32m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1_m( @@ -1003,7 +1003,7 @@ void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return vsoxei16_v_u64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2_m( @@ -1012,7 +1012,7 @@ void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return vsoxei16_v_u64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4_m( @@ -1021,7 +1021,7 @@ void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return vsoxei16_v_u64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8_m( @@ -1030,6 +1030,6 @@ void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return vsoxei16_v_u64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei16_v_u64m8_m(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei32.c index 6fb0df06240b..33a605334a53 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei32_v_f16mf4(base, bindex, value, vl); + return __riscv_vsoxei32_v_f16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei32_v_f16mf2(base, bindex, value, vl); + return __riscv_vsoxei32_v_f16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t va // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei32_v_f16m1(base, bindex, value, vl); + return __riscv_vsoxei32_v_f16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei32_v_f16m2(base, bindex, value, vl); + return __riscv_vsoxei32_v_f16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4( @@ -49,7 +49,7 @@ void test_vsoxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei32_v_f16m4(base, bindex, value, vl); + return __riscv_vsoxei32_v_f16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2( @@ -58,7 +58,7 @@ void test_vsoxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return vsoxei32_v_f32mf2(base, bindex, value, vl); + return __riscv_vsoxei32_v_f32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1( @@ -67,7 +67,7 @@ void test_vsoxei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return vsoxei32_v_f32m1(base, bindex, value, vl); + return __riscv_vsoxei32_v_f32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2( @@ -76,7 +76,7 @@ void test_vsoxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return vsoxei32_v_f32m2(base, bindex, value, vl); + return __riscv_vsoxei32_v_f32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4( @@ -85,7 +85,7 @@ void test_vsoxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return vsoxei32_v_f32m4(base, bindex, value, vl); + return __riscv_vsoxei32_v_f32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8( @@ -94,7 +94,7 @@ void test_vsoxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return vsoxei32_v_f32m8(base, bindex, value, vl); + return __riscv_vsoxei32_v_f32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1( @@ -103,7 +103,7 @@ void test_vsoxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return vsoxei32_v_f64m1(base, bindex, value, vl); + return __riscv_vsoxei32_v_f64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2( @@ -112,7 +112,7 @@ void test_vsoxei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return vsoxei32_v_f64m2(base, bindex, value, vl); + return __riscv_vsoxei32_v_f64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4( @@ -121,7 +121,7 @@ void test_vsoxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return vsoxei32_v_f64m4(base, bindex, value, vl); + return __riscv_vsoxei32_v_f64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8( @@ -130,7 +130,7 @@ void test_vsoxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return vsoxei32_v_f64m8(base, bindex, value, vl); + return __riscv_vsoxei32_v_f64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8( @@ -139,7 +139,7 @@ void test_vsoxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return vsoxei32_v_i8mf8(base, bindex, value, vl); + return __riscv_vsoxei32_v_i8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4( @@ -148,7 +148,7 @@ void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return vsoxei32_v_i8mf4(base, bindex, value, vl); + return __riscv_vsoxei32_v_i8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2( @@ -157,7 +157,7 @@ void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return vsoxei32_v_i8mf2(base, bindex, value, vl); + return __riscv_vsoxei32_v_i8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1( @@ -166,7 +166,7 @@ void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return vsoxei32_v_i8m1(base, bindex, value, vl); + return __riscv_vsoxei32_v_i8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2( @@ -175,7 +175,7 @@ void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return vsoxei32_v_i8m2(base, bindex, value, vl); + return __riscv_vsoxei32_v_i8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4( @@ -184,7 +184,7 @@ void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return vsoxei32_v_i16mf4(base, bindex, value, vl); + return __riscv_vsoxei32_v_i16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2( @@ -193,7 +193,7 @@ void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return vsoxei32_v_i16mf2(base, bindex, value, vl); + return __riscv_vsoxei32_v_i16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1( @@ -202,7 +202,7 @@ void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return vsoxei32_v_i16m1(base, bindex, value, vl); + return __riscv_vsoxei32_v_i16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2( @@ -211,7 +211,7 @@ void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return vsoxei32_v_i16m2(base, bindex, value, vl); + return __riscv_vsoxei32_v_i16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4( @@ -220,7 +220,7 @@ void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return vsoxei32_v_i16m4(base, bindex, value, vl); + return __riscv_vsoxei32_v_i16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2( @@ -229,7 +229,7 @@ void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return vsoxei32_v_i32mf2(base, bindex, value, vl); + return __riscv_vsoxei32_v_i32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1( @@ -238,7 +238,7 @@ void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return vsoxei32_v_i32m1(base, bindex, value, vl); + return __riscv_vsoxei32_v_i32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2( @@ -247,7 +247,7 @@ void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return vsoxei32_v_i32m2(base, bindex, value, vl); + return __riscv_vsoxei32_v_i32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4( @@ -256,7 +256,7 @@ void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return vsoxei32_v_i32m4(base, bindex, value, vl); + return __riscv_vsoxei32_v_i32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8( @@ -265,7 +265,7 @@ void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return vsoxei32_v_i32m8(base, bindex, value, vl); + return __riscv_vsoxei32_v_i32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1( @@ -274,7 +274,7 @@ void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return vsoxei32_v_i64m1(base, bindex, value, vl); + return __riscv_vsoxei32_v_i64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2( @@ -283,7 +283,7 @@ void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return vsoxei32_v_i64m2(base, bindex, value, vl); + return __riscv_vsoxei32_v_i64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4( @@ -292,7 +292,7 @@ void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return vsoxei32_v_i64m4(base, bindex, value, vl); + return __riscv_vsoxei32_v_i64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8( @@ -301,7 +301,7 @@ void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return vsoxei32_v_i64m8(base, bindex, value, vl); + return __riscv_vsoxei32_v_i64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8( @@ -310,7 +310,7 @@ void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return vsoxei32_v_u8mf8(base, bindex, value, vl); + return __riscv_vsoxei32_v_u8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4( @@ -319,7 +319,7 @@ void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return vsoxei32_v_u8mf4(base, bindex, value, vl); + return __riscv_vsoxei32_v_u8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2( @@ -328,7 +328,7 @@ void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return vsoxei32_v_u8mf2(base, bindex, value, vl); + return __riscv_vsoxei32_v_u8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1( @@ -337,7 +337,7 @@ void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return vsoxei32_v_u8m1(base, bindex, value, vl); + return __riscv_vsoxei32_v_u8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2( @@ -346,7 +346,7 @@ void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return vsoxei32_v_u8m2(base, bindex, value, vl); + return __riscv_vsoxei32_v_u8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4( @@ -355,7 +355,7 @@ void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return vsoxei32_v_u16mf4(base, bindex, value, vl); + return __riscv_vsoxei32_v_u16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2( @@ -364,7 +364,7 @@ void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t va // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return vsoxei32_v_u16mf2(base, bindex, value, vl); + return __riscv_vsoxei32_v_u16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1( @@ -373,7 +373,7 @@ void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t val // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return vsoxei32_v_u16m1(base, bindex, value, vl); + return __riscv_vsoxei32_v_u16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2( @@ -382,7 +382,7 @@ void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return vsoxei32_v_u16m2(base, bindex, value, vl); + return __riscv_vsoxei32_v_u16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4( @@ -391,7 +391,7 @@ void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return vsoxei32_v_u16m4(base, bindex, value, vl); + return __riscv_vsoxei32_v_u16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2( @@ -400,7 +400,7 @@ void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return vsoxei32_v_u32mf2(base, bindex, value, vl); + return __riscv_vsoxei32_v_u32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1( @@ -409,7 +409,7 @@ void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t va // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return vsoxei32_v_u32m1(base, bindex, value, vl); + return __riscv_vsoxei32_v_u32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2( @@ -418,7 +418,7 @@ void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return vsoxei32_v_u32m2(base, bindex, value, vl); + return __riscv_vsoxei32_v_u32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4( @@ -427,7 +427,7 @@ void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return vsoxei32_v_u32m4(base, bindex, value, vl); + return __riscv_vsoxei32_v_u32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8( @@ -436,7 +436,7 @@ void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return vsoxei32_v_u32m8(base, bindex, value, vl); + return __riscv_vsoxei32_v_u32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1( @@ -445,7 +445,7 @@ void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return vsoxei32_v_u64m1(base, bindex, value, vl); + return __riscv_vsoxei32_v_u64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2( @@ -454,7 +454,7 @@ void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return vsoxei32_v_u64m2(base, bindex, value, vl); + return __riscv_vsoxei32_v_u64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4( @@ -463,7 +463,7 @@ void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return vsoxei32_v_u64m4(base, bindex, value, vl); + return __riscv_vsoxei32_v_u64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8( @@ -472,7 +472,7 @@ void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return vsoxei32_v_u64m8(base, bindex, value, vl); + return __riscv_vsoxei32_v_u64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4_m( @@ -481,7 +481,7 @@ void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei32_v_f16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2_m( @@ -490,7 +490,7 @@ void test_vsoxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei32_v_f16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1_m( @@ -499,7 +499,7 @@ void test_vsoxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei32_v_f16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2_m( @@ -508,7 +508,7 @@ void test_vsoxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei32_v_f16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4_m( @@ -517,7 +517,7 @@ void test_vsoxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei32_v_f16m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2_m( @@ -526,7 +526,7 @@ void test_vsoxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return vsoxei32_v_f32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1_m( @@ -535,7 +535,7 @@ void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return vsoxei32_v_f32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2_m( @@ -544,7 +544,7 @@ void test_vsoxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return vsoxei32_v_f32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4_m( @@ -553,7 +553,7 @@ void test_vsoxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return vsoxei32_v_f32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8_m( @@ -562,7 +562,7 @@ void test_vsoxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return vsoxei32_v_f32m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1_m( @@ -571,7 +571,7 @@ void test_vsoxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return vsoxei32_v_f64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2_m( @@ -580,7 +580,7 @@ void test_vsoxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return vsoxei32_v_f64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4_m( @@ -589,7 +589,7 @@ void test_vsoxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return vsoxei32_v_f64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8_m( @@ -598,7 +598,7 @@ void test_vsoxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return vsoxei32_v_f64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_f64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8_m( @@ -607,7 +607,7 @@ void test_vsoxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return vsoxei32_v_i8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4_m( @@ -616,7 +616,7 @@ void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return vsoxei32_v_i8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2_m( @@ -625,7 +625,7 @@ void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return vsoxei32_v_i8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1_m( @@ -634,7 +634,7 @@ void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return vsoxei32_v_i8m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2_m( @@ -643,7 +643,7 @@ void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return vsoxei32_v_i8m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4_m( @@ -652,7 +652,7 @@ void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return vsoxei32_v_i16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2_m( @@ -661,7 +661,7 @@ void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return vsoxei32_v_i16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1_m( @@ -670,7 +670,7 @@ void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return vsoxei32_v_i16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2_m( @@ -679,7 +679,7 @@ void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return vsoxei32_v_i16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4_m( @@ -688,7 +688,7 @@ void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return vsoxei32_v_i16m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2_m( @@ -697,7 +697,7 @@ void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return vsoxei32_v_i32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1_m( @@ -706,7 +706,7 @@ void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return vsoxei32_v_i32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2_m( @@ -715,7 +715,7 @@ void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return vsoxei32_v_i32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4_m( @@ -724,7 +724,7 @@ void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return vsoxei32_v_i32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8_m( @@ -733,7 +733,7 @@ void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return vsoxei32_v_i32m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1_m( @@ -742,7 +742,7 @@ void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return vsoxei32_v_i64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2_m( @@ -751,7 +751,7 @@ void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return vsoxei32_v_i64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4_m( @@ -760,7 +760,7 @@ void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return vsoxei32_v_i64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8_m( @@ -769,7 +769,7 @@ void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return vsoxei32_v_i64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_i64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8_m( @@ -778,7 +778,7 @@ void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return vsoxei32_v_u8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4_m( @@ -787,7 +787,7 @@ void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return vsoxei32_v_u8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2_m( @@ -796,7 +796,7 @@ void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return vsoxei32_v_u8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1_m( @@ -805,7 +805,7 @@ void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return vsoxei32_v_u8m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2_m( @@ -814,7 +814,7 @@ void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return vsoxei32_v_u8m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4_m( @@ -823,7 +823,7 @@ void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return vsoxei32_v_u16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2_m( @@ -832,7 +832,7 @@ void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return vsoxei32_v_u16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1_m( @@ -841,7 +841,7 @@ void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return vsoxei32_v_u16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2_m( @@ -850,7 +850,7 @@ void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return vsoxei32_v_u16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4_m( @@ -859,7 +859,7 @@ void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return vsoxei32_v_u16m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2_m( @@ -868,7 +868,7 @@ void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return vsoxei32_v_u32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1_m( @@ -877,7 +877,7 @@ void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return vsoxei32_v_u32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2_m( @@ -886,7 +886,7 @@ void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return vsoxei32_v_u32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4_m( @@ -895,7 +895,7 @@ void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return vsoxei32_v_u32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8_m( @@ -904,7 +904,7 @@ void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return vsoxei32_v_u32m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1_m( @@ -913,7 +913,7 @@ void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return vsoxei32_v_u64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2_m( @@ -922,7 +922,7 @@ void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return vsoxei32_v_u64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4_m( @@ -931,7 +931,7 @@ void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return vsoxei32_v_u64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8_m( @@ -940,6 +940,6 @@ void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return vsoxei32_v_u64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei32_v_u64m8_m(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei64.c index d02e71218fca..d0b9e2294c42 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei64_v_f16mf4(base, bindex, value, vl); + return __riscv_vsoxei64_v_f16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t va // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei64_v_f16mf2(base, bindex, value, vl); + return __riscv_vsoxei64_v_f16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t va // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei64_v_f16m1(base, bindex, value, vl); + return __riscv_vsoxei64_v_f16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei64_v_f16m2(base, bindex, value, vl); + return __riscv_vsoxei64_v_f16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsoxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return vsoxei64_v_f32mf2(base, bindex, value, vl); + return __riscv_vsoxei64_v_f32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1( @@ -58,7 +58,7 @@ void test_vsoxei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return vsoxei64_v_f32m1(base, bindex, value, vl); + return __riscv_vsoxei64_v_f32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2( @@ -67,7 +67,7 @@ void test_vsoxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return vsoxei64_v_f32m2(base, bindex, value, vl); + return __riscv_vsoxei64_v_f32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4( @@ -76,7 +76,7 @@ void test_vsoxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return vsoxei64_v_f32m4(base, bindex, value, vl); + return __riscv_vsoxei64_v_f32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1( @@ -85,7 +85,7 @@ void test_vsoxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return vsoxei64_v_f64m1(base, bindex, value, vl); + return __riscv_vsoxei64_v_f64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2( @@ -94,7 +94,7 @@ void test_vsoxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return vsoxei64_v_f64m2(base, bindex, value, vl); + return __riscv_vsoxei64_v_f64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4( @@ -103,7 +103,7 @@ void test_vsoxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return vsoxei64_v_f64m4(base, bindex, value, vl); + return __riscv_vsoxei64_v_f64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8( @@ -112,7 +112,7 @@ void test_vsoxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return vsoxei64_v_f64m8(base, bindex, value, vl); + return __riscv_vsoxei64_v_f64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8( @@ -121,7 +121,7 @@ void test_vsoxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return vsoxei64_v_i8mf8(base, bindex, value, vl); + return __riscv_vsoxei64_v_i8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4( @@ -130,7 +130,7 @@ void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return vsoxei64_v_i8mf4(base, bindex, value, vl); + return __riscv_vsoxei64_v_i8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2( @@ -139,7 +139,7 @@ void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return vsoxei64_v_i8mf2(base, bindex, value, vl); + return __riscv_vsoxei64_v_i8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1( @@ -148,7 +148,7 @@ void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return vsoxei64_v_i8m1(base, bindex, value, vl); + return __riscv_vsoxei64_v_i8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4( @@ -157,7 +157,7 @@ void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return vsoxei64_v_i16mf4(base, bindex, value, vl); + return __riscv_vsoxei64_v_i16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2( @@ -166,7 +166,7 @@ void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return vsoxei64_v_i16mf2(base, bindex, value, vl); + return __riscv_vsoxei64_v_i16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1( @@ -175,7 +175,7 @@ void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return vsoxei64_v_i16m1(base, bindex, value, vl); + return __riscv_vsoxei64_v_i16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2( @@ -184,7 +184,7 @@ void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return vsoxei64_v_i16m2(base, bindex, value, vl); + return __riscv_vsoxei64_v_i16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2( @@ -193,7 +193,7 @@ void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return vsoxei64_v_i32mf2(base, bindex, value, vl); + return __riscv_vsoxei64_v_i32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1( @@ -202,7 +202,7 @@ void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return vsoxei64_v_i32m1(base, bindex, value, vl); + return __riscv_vsoxei64_v_i32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2( @@ -211,7 +211,7 @@ void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return vsoxei64_v_i32m2(base, bindex, value, vl); + return __riscv_vsoxei64_v_i32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4( @@ -220,7 +220,7 @@ void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return vsoxei64_v_i32m4(base, bindex, value, vl); + return __riscv_vsoxei64_v_i32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1( @@ -229,7 +229,7 @@ void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return vsoxei64_v_i64m1(base, bindex, value, vl); + return __riscv_vsoxei64_v_i64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2( @@ -238,7 +238,7 @@ void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return vsoxei64_v_i64m2(base, bindex, value, vl); + return __riscv_vsoxei64_v_i64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4( @@ -247,7 +247,7 @@ void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return vsoxei64_v_i64m4(base, bindex, value, vl); + return __riscv_vsoxei64_v_i64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8( @@ -256,7 +256,7 @@ void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return vsoxei64_v_i64m8(base, bindex, value, vl); + return __riscv_vsoxei64_v_i64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8( @@ -265,7 +265,7 @@ void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return vsoxei64_v_u8mf8(base, bindex, value, vl); + return __riscv_vsoxei64_v_u8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4( @@ -274,7 +274,7 @@ void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return vsoxei64_v_u8mf4(base, bindex, value, vl); + return __riscv_vsoxei64_v_u8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2( @@ -283,7 +283,7 @@ void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return vsoxei64_v_u8mf2(base, bindex, value, vl); + return __riscv_vsoxei64_v_u8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1( @@ -292,7 +292,7 @@ void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return vsoxei64_v_u8m1(base, bindex, value, vl); + return __riscv_vsoxei64_v_u8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4( @@ -301,7 +301,7 @@ void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return vsoxei64_v_u16mf4(base, bindex, value, vl); + return __riscv_vsoxei64_v_u16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2( @@ -310,7 +310,7 @@ void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t val // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return vsoxei64_v_u16mf2(base, bindex, value, vl); + return __riscv_vsoxei64_v_u16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1( @@ -319,7 +319,7 @@ void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t val // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return vsoxei64_v_u16m1(base, bindex, value, vl); + return __riscv_vsoxei64_v_u16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2( @@ -328,7 +328,7 @@ void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return vsoxei64_v_u16m2(base, bindex, value, vl); + return __riscv_vsoxei64_v_u16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2( @@ -337,7 +337,7 @@ void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return vsoxei64_v_u32mf2(base, bindex, value, vl); + return __riscv_vsoxei64_v_u32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1( @@ -346,7 +346,7 @@ void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t val // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return vsoxei64_v_u32m1(base, bindex, value, vl); + return __riscv_vsoxei64_v_u32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2( @@ -355,7 +355,7 @@ void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return vsoxei64_v_u32m2(base, bindex, value, vl); + return __riscv_vsoxei64_v_u32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4( @@ -364,7 +364,7 @@ void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return vsoxei64_v_u32m4(base, bindex, value, vl); + return __riscv_vsoxei64_v_u32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1( @@ -373,7 +373,7 @@ void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return vsoxei64_v_u64m1(base, bindex, value, vl); + return __riscv_vsoxei64_v_u64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2( @@ -382,7 +382,7 @@ void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return vsoxei64_v_u64m2(base, bindex, value, vl); + return __riscv_vsoxei64_v_u64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4( @@ -391,7 +391,7 @@ void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return vsoxei64_v_u64m4(base, bindex, value, vl); + return __riscv_vsoxei64_v_u64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8( @@ -400,7 +400,7 @@ void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return vsoxei64_v_u64m8(base, bindex, value, vl); + return __riscv_vsoxei64_v_u64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4_m( @@ -409,7 +409,7 @@ void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei64_v_f16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2_m( @@ -418,7 +418,7 @@ void test_vsoxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei64_v_f16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1_m( @@ -427,7 +427,7 @@ void test_vsoxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei64_v_f16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2_m( @@ -436,7 +436,7 @@ void test_vsoxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei64_v_f16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2_m( @@ -445,7 +445,7 @@ void test_vsoxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return vsoxei64_v_f32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1_m( @@ -454,7 +454,7 @@ void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return vsoxei64_v_f32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2_m( @@ -463,7 +463,7 @@ void test_vsoxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return vsoxei64_v_f32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4_m( @@ -472,7 +472,7 @@ void test_vsoxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return vsoxei64_v_f32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1_m( @@ -481,7 +481,7 @@ void test_vsoxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return vsoxei64_v_f64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2_m( @@ -490,7 +490,7 @@ void test_vsoxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return vsoxei64_v_f64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4_m( @@ -499,7 +499,7 @@ void test_vsoxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return vsoxei64_v_f64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8_m( @@ -508,7 +508,7 @@ void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return vsoxei64_v_f64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_f64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8_m( @@ -517,7 +517,7 @@ void test_vsoxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return vsoxei64_v_i8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4_m( @@ -526,7 +526,7 @@ void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return vsoxei64_v_i8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2_m( @@ -535,7 +535,7 @@ void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return vsoxei64_v_i8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1_m( @@ -544,7 +544,7 @@ void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return vsoxei64_v_i8m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4_m( @@ -553,7 +553,7 @@ void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return vsoxei64_v_i16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2_m( @@ -562,7 +562,7 @@ void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return vsoxei64_v_i16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1_m( @@ -571,7 +571,7 @@ void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return vsoxei64_v_i16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2_m( @@ -580,7 +580,7 @@ void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return vsoxei64_v_i16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2_m( @@ -589,7 +589,7 @@ void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return vsoxei64_v_i32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1_m( @@ -598,7 +598,7 @@ void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return vsoxei64_v_i32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2_m( @@ -607,7 +607,7 @@ void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return vsoxei64_v_i32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4_m( @@ -616,7 +616,7 @@ void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return vsoxei64_v_i32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1_m( @@ -625,7 +625,7 @@ void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return vsoxei64_v_i64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2_m( @@ -634,7 +634,7 @@ void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return vsoxei64_v_i64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4_m( @@ -643,7 +643,7 @@ void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return vsoxei64_v_i64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8_m( @@ -652,7 +652,7 @@ void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return vsoxei64_v_i64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_i64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8_m( @@ -661,7 +661,7 @@ void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return vsoxei64_v_u8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4_m( @@ -670,7 +670,7 @@ void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return vsoxei64_v_u8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2_m( @@ -679,7 +679,7 @@ void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return vsoxei64_v_u8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1_m( @@ -688,7 +688,7 @@ void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return vsoxei64_v_u8m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4_m( @@ -697,7 +697,7 @@ void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return vsoxei64_v_u16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2_m( @@ -706,7 +706,7 @@ void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return vsoxei64_v_u16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1_m( @@ -715,7 +715,7 @@ void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return vsoxei64_v_u16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2_m( @@ -724,7 +724,7 @@ void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return vsoxei64_v_u16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2_m( @@ -733,7 +733,7 @@ void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return vsoxei64_v_u32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1_m( @@ -742,7 +742,7 @@ void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return vsoxei64_v_u32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2_m( @@ -751,7 +751,7 @@ void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return vsoxei64_v_u32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4_m( @@ -760,7 +760,7 @@ void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return vsoxei64_v_u32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1_m( @@ -769,7 +769,7 @@ void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return vsoxei64_v_u64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2_m( @@ -778,7 +778,7 @@ void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return vsoxei64_v_u64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4_m( @@ -787,7 +787,7 @@ void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return vsoxei64_v_u64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8_m( @@ -796,6 +796,6 @@ void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return vsoxei64_v_u64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei64_v_u64m8_m(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei8.c index 7ad530831a5f..2a7423bc3692 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei8_v_f16mf4(base, bindex, value, vl); + return __riscv_vsoxei8_v_f16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t val // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei8_v_f16mf2(base, bindex, value, vl); + return __riscv_vsoxei8_v_f16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t val // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei8_v_f16m1(base, bindex, value, vl); + return __riscv_vsoxei8_v_f16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei8_v_f16m2(base, bindex, value, vl); + return __riscv_vsoxei8_v_f16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4( @@ -49,7 +49,7 @@ void test_vsoxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei8_v_f16m4(base, bindex, value, vl); + return __riscv_vsoxei8_v_f16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8( @@ -58,7 +58,7 @@ void test_vsoxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return vsoxei8_v_f16m8(base, bindex, value, vl); + return __riscv_vsoxei8_v_f16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2( @@ -67,7 +67,7 @@ void test_vsoxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return vsoxei8_v_f32mf2(base, bindex, value, vl); + return __riscv_vsoxei8_v_f32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1( @@ -76,7 +76,7 @@ void test_vsoxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return vsoxei8_v_f32m1(base, bindex, value, vl); + return __riscv_vsoxei8_v_f32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2( @@ -85,7 +85,7 @@ void test_vsoxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return vsoxei8_v_f32m2(base, bindex, value, vl); + return __riscv_vsoxei8_v_f32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4( @@ -94,7 +94,7 @@ void test_vsoxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return vsoxei8_v_f32m4(base, bindex, value, vl); + return __riscv_vsoxei8_v_f32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8( @@ -103,7 +103,7 @@ void test_vsoxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, si // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return vsoxei8_v_f32m8(base, bindex, value, vl); + return __riscv_vsoxei8_v_f32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1( @@ -112,7 +112,7 @@ void test_vsoxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, si // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return vsoxei8_v_f64m1(base, bindex, value, vl); + return __riscv_vsoxei8_v_f64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2( @@ -121,7 +121,7 @@ void test_vsoxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return vsoxei8_v_f64m2(base, bindex, value, vl); + return __riscv_vsoxei8_v_f64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4( @@ -130,7 +130,7 @@ void test_vsoxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return vsoxei8_v_f64m4(base, bindex, value, vl); + return __riscv_vsoxei8_v_f64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8( @@ -139,7 +139,7 @@ void test_vsoxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return vsoxei8_v_f64m8(base, bindex, value, vl); + return __riscv_vsoxei8_v_f64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8( @@ -148,7 +148,7 @@ void test_vsoxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return vsoxei8_v_i8mf8(base, bindex, value, vl); + return __riscv_vsoxei8_v_i8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4( @@ -157,7 +157,7 @@ void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, si // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return vsoxei8_v_i8mf4(base, bindex, value, vl); + return __riscv_vsoxei8_v_i8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2( @@ -166,7 +166,7 @@ void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, si // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return vsoxei8_v_i8mf2(base, bindex, value, vl); + return __riscv_vsoxei8_v_i8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1( @@ -175,7 +175,7 @@ void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, si // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return vsoxei8_v_i8m1(base, bindex, value, vl); + return __riscv_vsoxei8_v_i8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2( @@ -184,7 +184,7 @@ void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return vsoxei8_v_i8m2(base, bindex, value, vl); + return __riscv_vsoxei8_v_i8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4( @@ -193,7 +193,7 @@ void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return vsoxei8_v_i8m4(base, bindex, value, vl); + return __riscv_vsoxei8_v_i8m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8( @@ -202,7 +202,7 @@ void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return vsoxei8_v_i8m8(base, bindex, value, vl); + return __riscv_vsoxei8_v_i8m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4( @@ -211,7 +211,7 @@ void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return vsoxei8_v_i16mf4(base, bindex, value, vl); + return __riscv_vsoxei8_v_i16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2( @@ -220,7 +220,7 @@ void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return vsoxei8_v_i16mf2(base, bindex, value, vl); + return __riscv_vsoxei8_v_i16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1( @@ -229,7 +229,7 @@ void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return vsoxei8_v_i16m1(base, bindex, value, vl); + return __riscv_vsoxei8_v_i16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2( @@ -238,7 +238,7 @@ void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return vsoxei8_v_i16m2(base, bindex, value, vl); + return __riscv_vsoxei8_v_i16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4( @@ -247,7 +247,7 @@ void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, si // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return vsoxei8_v_i16m4(base, bindex, value, vl); + return __riscv_vsoxei8_v_i16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8( @@ -256,7 +256,7 @@ void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, si // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return vsoxei8_v_i16m8(base, bindex, value, vl); + return __riscv_vsoxei8_v_i16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2( @@ -265,7 +265,7 @@ void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, si // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return vsoxei8_v_i32mf2(base, bindex, value, vl); + return __riscv_vsoxei8_v_i32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1( @@ -274,7 +274,7 @@ void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return vsoxei8_v_i32m1(base, bindex, value, vl); + return __riscv_vsoxei8_v_i32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2( @@ -283,7 +283,7 @@ void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return vsoxei8_v_i32m2(base, bindex, value, vl); + return __riscv_vsoxei8_v_i32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4( @@ -292,7 +292,7 @@ void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return vsoxei8_v_i32m4(base, bindex, value, vl); + return __riscv_vsoxei8_v_i32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8( @@ -301,7 +301,7 @@ void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, si // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return vsoxei8_v_i32m8(base, bindex, value, vl); + return __riscv_vsoxei8_v_i32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1( @@ -310,7 +310,7 @@ void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, si // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return vsoxei8_v_i64m1(base, bindex, value, vl); + return __riscv_vsoxei8_v_i64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2( @@ -319,7 +319,7 @@ void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return vsoxei8_v_i64m2(base, bindex, value, vl); + return __riscv_vsoxei8_v_i64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4( @@ -328,7 +328,7 @@ void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return vsoxei8_v_i64m4(base, bindex, value, vl); + return __riscv_vsoxei8_v_i64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8( @@ -337,7 +337,7 @@ void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return vsoxei8_v_i64m8(base, bindex, value, vl); + return __riscv_vsoxei8_v_i64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8( @@ -346,7 +346,7 @@ void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, si // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return vsoxei8_v_u8mf8(base, bindex, value, vl); + return __riscv_vsoxei8_v_u8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4( @@ -355,7 +355,7 @@ void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return vsoxei8_v_u8mf4(base, bindex, value, vl); + return __riscv_vsoxei8_v_u8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2( @@ -364,7 +364,7 @@ void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return vsoxei8_v_u8mf2(base, bindex, value, vl); + return __riscv_vsoxei8_v_u8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1( @@ -373,7 +373,7 @@ void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return vsoxei8_v_u8m1(base, bindex, value, vl); + return __riscv_vsoxei8_v_u8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2( @@ -382,7 +382,7 @@ void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return vsoxei8_v_u8m2(base, bindex, value, vl); + return __riscv_vsoxei8_v_u8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4( @@ -391,7 +391,7 @@ void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return vsoxei8_v_u8m4(base, bindex, value, vl); + return __riscv_vsoxei8_v_u8m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8( @@ -400,7 +400,7 @@ void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return vsoxei8_v_u8m8(base, bindex, value, vl); + return __riscv_vsoxei8_v_u8m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4( @@ -409,7 +409,7 @@ void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return vsoxei8_v_u16mf4(base, bindex, value, vl); + return __riscv_vsoxei8_v_u16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2( @@ -418,7 +418,7 @@ void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return vsoxei8_v_u16mf2(base, bindex, value, vl); + return __riscv_vsoxei8_v_u16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1( @@ -427,7 +427,7 @@ void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return vsoxei8_v_u16m1(base, bindex, value, vl); + return __riscv_vsoxei8_v_u16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2( @@ -436,7 +436,7 @@ void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return vsoxei8_v_u16m2(base, bindex, value, vl); + return __riscv_vsoxei8_v_u16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4( @@ -445,7 +445,7 @@ void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return vsoxei8_v_u16m4(base, bindex, value, vl); + return __riscv_vsoxei8_v_u16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8( @@ -454,7 +454,7 @@ void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return vsoxei8_v_u16m8(base, bindex, value, vl); + return __riscv_vsoxei8_v_u16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2( @@ -463,7 +463,7 @@ void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return vsoxei8_v_u32mf2(base, bindex, value, vl); + return __riscv_vsoxei8_v_u32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1( @@ -472,7 +472,7 @@ void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return vsoxei8_v_u32m1(base, bindex, value, vl); + return __riscv_vsoxei8_v_u32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2( @@ -481,7 +481,7 @@ void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return vsoxei8_v_u32m2(base, bindex, value, vl); + return __riscv_vsoxei8_v_u32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4( @@ -490,7 +490,7 @@ void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return vsoxei8_v_u32m4(base, bindex, value, vl); + return __riscv_vsoxei8_v_u32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8( @@ -499,7 +499,7 @@ void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return vsoxei8_v_u32m8(base, bindex, value, vl); + return __riscv_vsoxei8_v_u32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1( @@ -508,7 +508,7 @@ void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return vsoxei8_v_u64m1(base, bindex, value, vl); + return __riscv_vsoxei8_v_u64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2( @@ -517,7 +517,7 @@ void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return vsoxei8_v_u64m2(base, bindex, value, vl); + return __riscv_vsoxei8_v_u64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4( @@ -526,7 +526,7 @@ void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return vsoxei8_v_u64m4(base, bindex, value, vl); + return __riscv_vsoxei8_v_u64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8( @@ -535,7 +535,7 @@ void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return vsoxei8_v_u64m8(base, bindex, value, vl); + return __riscv_vsoxei8_v_u64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4_m( @@ -544,7 +544,7 @@ void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei8_v_f16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2_m( @@ -553,7 +553,7 @@ void test_vsoxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei8_v_f16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1_m( @@ -562,7 +562,7 @@ void test_vsoxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei8_v_f16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2_m( @@ -571,7 +571,7 @@ void test_vsoxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei8_v_f16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4_m( @@ -580,7 +580,7 @@ void test_vsoxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei8_v_f16m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8_m( @@ -589,7 +589,7 @@ void test_vsoxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return vsoxei8_v_f16m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2_m( @@ -598,7 +598,7 @@ void test_vsoxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return vsoxei8_v_f32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1_m( @@ -607,7 +607,7 @@ void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return vsoxei8_v_f32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2_m( @@ -616,7 +616,7 @@ void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return vsoxei8_v_f32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4_m( @@ -625,7 +625,7 @@ void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return vsoxei8_v_f32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8_m( @@ -634,7 +634,7 @@ void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloa // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return vsoxei8_v_f32m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1_m( @@ -643,7 +643,7 @@ void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, vfloa // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return vsoxei8_v_f64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2_m( @@ -652,7 +652,7 @@ void test_vsoxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return vsoxei8_v_f64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4_m( @@ -661,7 +661,7 @@ void test_vsoxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return vsoxei8_v_f64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8_m( @@ -670,7 +670,7 @@ void test_vsoxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return vsoxei8_v_f64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_f64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8_m( @@ -679,7 +679,7 @@ void test_vsoxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, vflo // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return vsoxei8_v_i8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4_m( @@ -688,7 +688,7 @@ void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vi // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return vsoxei8_v_i8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2_m( @@ -697,7 +697,7 @@ void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vi // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return vsoxei8_v_i8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1_m( @@ -706,7 +706,7 @@ void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vi // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return vsoxei8_v_i8m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2_m( @@ -715,7 +715,7 @@ void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8 // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return vsoxei8_v_i8m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4_m( @@ -724,7 +724,7 @@ void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8 // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return vsoxei8_v_i8m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i8m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8_m( @@ -733,7 +733,7 @@ void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8 // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return vsoxei8_v_i8m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i8m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4_m( @@ -742,7 +742,7 @@ void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8 // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return vsoxei8_v_i16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2_m( @@ -751,7 +751,7 @@ void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return vsoxei8_v_i16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1_m( @@ -760,7 +760,7 @@ void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return vsoxei8_v_i16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2_m( @@ -769,7 +769,7 @@ void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return vsoxei8_v_i16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4_m( @@ -778,7 +778,7 @@ void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return vsoxei8_v_i16m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8_m( @@ -787,7 +787,7 @@ void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return vsoxei8_v_i16m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2_m( @@ -796,7 +796,7 @@ void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return vsoxei8_v_i32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1_m( @@ -805,7 +805,7 @@ void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return vsoxei8_v_i32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2_m( @@ -814,7 +814,7 @@ void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return vsoxei8_v_i32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4_m( @@ -823,7 +823,7 @@ void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return vsoxei8_v_i32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8_m( @@ -832,7 +832,7 @@ void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return vsoxei8_v_i32m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1_m( @@ -841,7 +841,7 @@ void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return vsoxei8_v_i64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2_m( @@ -850,7 +850,7 @@ void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return vsoxei8_v_i64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4_m( @@ -859,7 +859,7 @@ void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return vsoxei8_v_i64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8_m( @@ -868,7 +868,7 @@ void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return vsoxei8_v_i64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_i64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8_m( @@ -877,7 +877,7 @@ void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return vsoxei8_v_u8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4_m( @@ -886,7 +886,7 @@ void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return vsoxei8_v_u8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2_m( @@ -895,7 +895,7 @@ void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return vsoxei8_v_u8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1_m( @@ -904,7 +904,7 @@ void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return vsoxei8_v_u8m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2_m( @@ -913,7 +913,7 @@ void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuin // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return vsoxei8_v_u8m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4_m( @@ -922,7 +922,7 @@ void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuin // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return vsoxei8_v_u8m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u8m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8_m( @@ -931,7 +931,7 @@ void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuin // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return vsoxei8_v_u8m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u8m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4_m( @@ -940,7 +940,7 @@ void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuin // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return vsoxei8_v_u16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2_m( @@ -949,7 +949,7 @@ void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return vsoxei8_v_u16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1_m( @@ -958,7 +958,7 @@ void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return vsoxei8_v_u16m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2_m( @@ -967,7 +967,7 @@ void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return vsoxei8_v_u16m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4_m( @@ -976,7 +976,7 @@ void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return vsoxei8_v_u16m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8_m( @@ -985,7 +985,7 @@ void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return vsoxei8_v_u16m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2_m( @@ -994,7 +994,7 @@ void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return vsoxei8_v_u32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1_m( @@ -1003,7 +1003,7 @@ void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return vsoxei8_v_u32m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2_m( @@ -1012,7 +1012,7 @@ void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return vsoxei8_v_u32m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4_m( @@ -1021,7 +1021,7 @@ void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return vsoxei8_v_u32m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8_m( @@ -1030,7 +1030,7 @@ void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return vsoxei8_v_u32m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1_m( @@ -1039,7 +1039,7 @@ void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return vsoxei8_v_u64m1_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2_m( @@ -1048,7 +1048,7 @@ void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return vsoxei8_v_u64m2_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4_m( @@ -1057,7 +1057,7 @@ void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return vsoxei8_v_u64m4_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8_m( @@ -1066,6 +1066,6 @@ void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return vsoxei8_v_u64m8_m(mask, base, bindex, value, vl); + return __riscv_vsoxei8_v_u64m8_m(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c index cabd4777f793..4e74f8760b9a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4( @@ -49,7 +49,7 @@ void test_vsoxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2( @@ -58,7 +58,7 @@ void test_vsoxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1( @@ -67,7 +67,7 @@ void test_vsoxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2( @@ -76,7 +76,7 @@ void test_vsoxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4( @@ -85,7 +85,7 @@ void test_vsoxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1( @@ -94,7 +94,7 @@ void test_vsoxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2( @@ -103,7 +103,7 @@ void test_vsoxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4( @@ -112,7 +112,7 @@ void test_vsoxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8( @@ -121,7 +121,7 @@ void test_vsoxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf8(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4( @@ -130,7 +130,7 @@ void test_vsoxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2( @@ -139,7 +139,7 @@ void test_vsoxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1( @@ -148,7 +148,7 @@ void test_vsoxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2( @@ -157,7 +157,7 @@ void test_vsoxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4( @@ -166,7 +166,7 @@ void test_vsoxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4( @@ -175,7 +175,7 @@ void test_vsoxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2( @@ -184,7 +184,7 @@ void test_vsoxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1( @@ -193,7 +193,7 @@ void test_vsoxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2( @@ -202,7 +202,7 @@ void test_vsoxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4( @@ -211,7 +211,7 @@ void test_vsoxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1( @@ -229,7 +229,7 @@ void test_vsoxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2( @@ -238,7 +238,7 @@ void test_vsoxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4( @@ -247,7 +247,7 @@ void test_vsoxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1( @@ -256,7 +256,7 @@ void test_vsoxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2( @@ -265,7 +265,7 @@ void test_vsoxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4( @@ -274,7 +274,7 @@ void test_vsoxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8( @@ -283,7 +283,7 @@ void test_vsoxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf8(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4( @@ -292,7 +292,7 @@ void test_vsoxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2( @@ -301,7 +301,7 @@ void test_vsoxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1( @@ -310,7 +310,7 @@ void test_vsoxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2( @@ -319,7 +319,7 @@ void test_vsoxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4( @@ -328,7 +328,7 @@ void test_vsoxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4( @@ -337,7 +337,7 @@ void test_vsoxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2( @@ -346,7 +346,7 @@ void test_vsoxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1( @@ -355,7 +355,7 @@ void test_vsoxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2( @@ -364,7 +364,7 @@ void test_vsoxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4( @@ -373,7 +373,7 @@ void test_vsoxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2( @@ -382,7 +382,7 @@ void test_vsoxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1( @@ -391,7 +391,7 @@ void test_vsoxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2( @@ -400,7 +400,7 @@ void test_vsoxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4( @@ -409,7 +409,7 @@ void test_vsoxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1( @@ -418,7 +418,7 @@ void test_vsoxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2( @@ -427,7 +427,7 @@ void test_vsoxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4( @@ -436,7 +436,7 @@ void test_vsoxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4_m( @@ -445,7 +445,7 @@ void test_vsoxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2_m( @@ -472,7 +472,7 @@ void test_vsoxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4_m( @@ -481,7 +481,7 @@ void test_vsoxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2_m( @@ -490,7 +490,7 @@ void test_vsoxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1_m( @@ -499,7 +499,7 @@ void test_vsoxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2_m( @@ -508,7 +508,7 @@ void test_vsoxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4_m( @@ -517,7 +517,7 @@ void test_vsoxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1_m( @@ -526,7 +526,7 @@ void test_vsoxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2_m( @@ -535,7 +535,7 @@ void test_vsoxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4_m( @@ -544,7 +544,7 @@ void test_vsoxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8_m( @@ -553,7 +553,7 @@ void test_vsoxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4_m( @@ -562,7 +562,7 @@ void test_vsoxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2_m( @@ -571,7 +571,7 @@ void test_vsoxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1_m( @@ -580,7 +580,7 @@ void test_vsoxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2_m( @@ -589,7 +589,7 @@ void test_vsoxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4_m( @@ -598,7 +598,7 @@ void test_vsoxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4_m( @@ -607,7 +607,7 @@ void test_vsoxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2_m( @@ -616,7 +616,7 @@ void test_vsoxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1_m( @@ -625,7 +625,7 @@ void test_vsoxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2_m( @@ -634,7 +634,7 @@ void test_vsoxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4_m( @@ -643,7 +643,7 @@ void test_vsoxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2_m( @@ -652,7 +652,7 @@ void test_vsoxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1_m( @@ -661,7 +661,7 @@ void test_vsoxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2_m( @@ -670,7 +670,7 @@ void test_vsoxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4_m( @@ -679,7 +679,7 @@ void test_vsoxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1_m( @@ -688,7 +688,7 @@ void test_vsoxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2_m( @@ -697,7 +697,7 @@ void test_vsoxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4_m( @@ -706,7 +706,7 @@ void test_vsoxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8_m( @@ -715,7 +715,7 @@ void test_vsoxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4_m( @@ -724,7 +724,7 @@ void test_vsoxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2_m( @@ -733,7 +733,7 @@ void test_vsoxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1_m( @@ -742,7 +742,7 @@ void test_vsoxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2_m( @@ -751,7 +751,7 @@ void test_vsoxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4_m( @@ -760,7 +760,7 @@ void test_vsoxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4_m( @@ -769,7 +769,7 @@ void test_vsoxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2_m( @@ -778,7 +778,7 @@ void test_vsoxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1_m( @@ -787,7 +787,7 @@ void test_vsoxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2_m( @@ -796,7 +796,7 @@ void test_vsoxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4_m( @@ -805,7 +805,7 @@ void test_vsoxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2_m( @@ -814,7 +814,7 @@ void test_vsoxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1_m( @@ -823,7 +823,7 @@ void test_vsoxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2_m( @@ -832,7 +832,7 @@ void test_vsoxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4_m( @@ -841,7 +841,7 @@ void test_vsoxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1_m( @@ -850,7 +850,7 @@ void test_vsoxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2_m( @@ -859,7 +859,7 @@ void test_vsoxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4_m( @@ -868,6 +868,6 @@ void test_vsoxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c index 5011efaadb6b..a463a3d2d71e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4( @@ -49,7 +49,7 @@ void test_vsoxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2( @@ -58,7 +58,7 @@ void test_vsoxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1( @@ -67,7 +67,7 @@ void test_vsoxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2( @@ -76,7 +76,7 @@ void test_vsoxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4( @@ -85,7 +85,7 @@ void test_vsoxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1( @@ -94,7 +94,7 @@ void test_vsoxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2( @@ -103,7 +103,7 @@ void test_vsoxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4( @@ -112,7 +112,7 @@ void test_vsoxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8( @@ -121,7 +121,7 @@ void test_vsoxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf8(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4( @@ -130,7 +130,7 @@ void test_vsoxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2( @@ -139,7 +139,7 @@ void test_vsoxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1( @@ -148,7 +148,7 @@ void test_vsoxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2( @@ -157,7 +157,7 @@ void test_vsoxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4( @@ -166,7 +166,7 @@ void test_vsoxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2( @@ -175,7 +175,7 @@ void test_vsoxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1( @@ -184,7 +184,7 @@ void test_vsoxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2( @@ -193,7 +193,7 @@ void test_vsoxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4( @@ -202,7 +202,7 @@ void test_vsoxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2( @@ -211,7 +211,7 @@ void test_vsoxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1( @@ -220,7 +220,7 @@ void test_vsoxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2( @@ -229,7 +229,7 @@ void test_vsoxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4( @@ -238,7 +238,7 @@ void test_vsoxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1( @@ -247,7 +247,7 @@ void test_vsoxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2( @@ -256,7 +256,7 @@ void test_vsoxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4( @@ -265,7 +265,7 @@ void test_vsoxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8( @@ -274,7 +274,7 @@ void test_vsoxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf8(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4( @@ -283,7 +283,7 @@ void test_vsoxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2( @@ -292,7 +292,7 @@ void test_vsoxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1( @@ -301,7 +301,7 @@ void test_vsoxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2( @@ -310,7 +310,7 @@ void test_vsoxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4( @@ -319,7 +319,7 @@ void test_vsoxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2( @@ -328,7 +328,7 @@ void test_vsoxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1( @@ -337,7 +337,7 @@ void test_vsoxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2( @@ -346,7 +346,7 @@ void test_vsoxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4( @@ -355,7 +355,7 @@ void test_vsoxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2( @@ -364,7 +364,7 @@ void test_vsoxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1( @@ -373,7 +373,7 @@ void test_vsoxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2( @@ -382,7 +382,7 @@ void test_vsoxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4( @@ -391,7 +391,7 @@ void test_vsoxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1( @@ -400,7 +400,7 @@ void test_vsoxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2( @@ -409,7 +409,7 @@ void test_vsoxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4( @@ -418,7 +418,7 @@ void test_vsoxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2_m( @@ -454,7 +454,7 @@ void test_vsoxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4_m( @@ -463,7 +463,7 @@ void test_vsoxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2_m( @@ -472,7 +472,7 @@ void test_vsoxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1_m( @@ -481,7 +481,7 @@ void test_vsoxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2_m( @@ -490,7 +490,7 @@ void test_vsoxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4_m( @@ -499,7 +499,7 @@ void test_vsoxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1_m( @@ -508,7 +508,7 @@ void test_vsoxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2_m( @@ -517,7 +517,7 @@ void test_vsoxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4_m( @@ -526,7 +526,7 @@ void test_vsoxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8_m( @@ -535,7 +535,7 @@ void test_vsoxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4_m( @@ -544,7 +544,7 @@ void test_vsoxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2_m( @@ -553,7 +553,7 @@ void test_vsoxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1_m( @@ -562,7 +562,7 @@ void test_vsoxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2_m( @@ -571,7 +571,7 @@ void test_vsoxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4_m( @@ -580,7 +580,7 @@ void test_vsoxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2_m( @@ -589,7 +589,7 @@ void test_vsoxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1_m( @@ -598,7 +598,7 @@ void test_vsoxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2_m( @@ -607,7 +607,7 @@ void test_vsoxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4_m( @@ -616,7 +616,7 @@ void test_vsoxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2_m( @@ -625,7 +625,7 @@ void test_vsoxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1_m( @@ -634,7 +634,7 @@ void test_vsoxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2_m( @@ -643,7 +643,7 @@ void test_vsoxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4_m( @@ -652,7 +652,7 @@ void test_vsoxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1_m( @@ -661,7 +661,7 @@ void test_vsoxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2_m( @@ -670,7 +670,7 @@ void test_vsoxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4_m( @@ -679,7 +679,7 @@ void test_vsoxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8_m( @@ -688,7 +688,7 @@ void test_vsoxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4_m( @@ -697,7 +697,7 @@ void test_vsoxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2_m( @@ -706,7 +706,7 @@ void test_vsoxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1_m( @@ -715,7 +715,7 @@ void test_vsoxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2_m( @@ -724,7 +724,7 @@ void test_vsoxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4_m( @@ -733,7 +733,7 @@ void test_vsoxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2_m( @@ -742,7 +742,7 @@ void test_vsoxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1_m( @@ -751,7 +751,7 @@ void test_vsoxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2_m( @@ -760,7 +760,7 @@ void test_vsoxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4_m( @@ -769,7 +769,7 @@ void test_vsoxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2_m( @@ -778,7 +778,7 @@ void test_vsoxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1_m( @@ -787,7 +787,7 @@ void test_vsoxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2_m( @@ -796,7 +796,7 @@ void test_vsoxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4_m( @@ -805,7 +805,7 @@ void test_vsoxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1_m( @@ -814,7 +814,7 @@ void test_vsoxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2_m( @@ -823,7 +823,7 @@ void test_vsoxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4_m( @@ -832,6 +832,6 @@ void test_vsoxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c index 1d9809c1ffed..3991ba7a1a87 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsoxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1( @@ -58,7 +58,7 @@ void test_vsoxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2( @@ -67,7 +67,7 @@ void test_vsoxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4( @@ -76,7 +76,7 @@ void test_vsoxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1( @@ -85,7 +85,7 @@ void test_vsoxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2( @@ -94,7 +94,7 @@ void test_vsoxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4( @@ -103,7 +103,7 @@ void test_vsoxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8( @@ -112,7 +112,7 @@ void test_vsoxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf8(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4( @@ -121,7 +121,7 @@ void test_vsoxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2( @@ -130,7 +130,7 @@ void test_vsoxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1( @@ -139,7 +139,7 @@ void test_vsoxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i8m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4( @@ -148,7 +148,7 @@ void test_vsoxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2( @@ -157,7 +157,7 @@ void test_vsoxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1( @@ -166,7 +166,7 @@ void test_vsoxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2( @@ -175,7 +175,7 @@ void test_vsoxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2( @@ -184,7 +184,7 @@ void test_vsoxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1( @@ -193,7 +193,7 @@ void test_vsoxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2( @@ -202,7 +202,7 @@ void test_vsoxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4( @@ -211,7 +211,7 @@ void test_vsoxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1( @@ -220,7 +220,7 @@ void test_vsoxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2( @@ -229,7 +229,7 @@ void test_vsoxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4( @@ -238,7 +238,7 @@ void test_vsoxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8( @@ -247,7 +247,7 @@ void test_vsoxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf8(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4( @@ -256,7 +256,7 @@ void test_vsoxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2( @@ -265,7 +265,7 @@ void test_vsoxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1( @@ -274,7 +274,7 @@ void test_vsoxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u8m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4( @@ -283,7 +283,7 @@ void test_vsoxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2( @@ -292,7 +292,7 @@ void test_vsoxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1( @@ -301,7 +301,7 @@ void test_vsoxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2( @@ -310,7 +310,7 @@ void test_vsoxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2( @@ -319,7 +319,7 @@ void test_vsoxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1( @@ -328,7 +328,7 @@ void test_vsoxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2( @@ -337,7 +337,7 @@ void test_vsoxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4( @@ -346,7 +346,7 @@ void test_vsoxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1( @@ -355,7 +355,7 @@ void test_vsoxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2( @@ -364,7 +364,7 @@ void test_vsoxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4( @@ -373,7 +373,7 @@ void test_vsoxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4_m( @@ -382,7 +382,7 @@ void test_vsoxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2_m( @@ -391,7 +391,7 @@ void test_vsoxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1_m( @@ -400,7 +400,7 @@ void test_vsoxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2_m( @@ -409,7 +409,7 @@ void test_vsoxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2_m( @@ -418,7 +418,7 @@ void test_vsoxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1_m( @@ -427,7 +427,7 @@ void test_vsoxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2_m( @@ -436,7 +436,7 @@ void test_vsoxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4_m( @@ -445,7 +445,7 @@ void test_vsoxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1_m( @@ -454,7 +454,7 @@ void test_vsoxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2_m( @@ -463,7 +463,7 @@ void test_vsoxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4_m( @@ -472,7 +472,7 @@ void test_vsoxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8_m( @@ -481,7 +481,7 @@ void test_vsoxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4_m( @@ -490,7 +490,7 @@ void test_vsoxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2_m( @@ -499,7 +499,7 @@ void test_vsoxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1_m( @@ -508,7 +508,7 @@ void test_vsoxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4_m( @@ -517,7 +517,7 @@ void test_vsoxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2_m( @@ -526,7 +526,7 @@ void test_vsoxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1_m( @@ -535,7 +535,7 @@ void test_vsoxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2_m( @@ -544,7 +544,7 @@ void test_vsoxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2_m( @@ -553,7 +553,7 @@ void test_vsoxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1_m( @@ -562,7 +562,7 @@ void test_vsoxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2_m( @@ -571,7 +571,7 @@ void test_vsoxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4_m( @@ -580,7 +580,7 @@ void test_vsoxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1_m( @@ -589,7 +589,7 @@ void test_vsoxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2_m( @@ -598,7 +598,7 @@ void test_vsoxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4_m( @@ -607,7 +607,7 @@ void test_vsoxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8_m( @@ -616,7 +616,7 @@ void test_vsoxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4_m( @@ -625,7 +625,7 @@ void test_vsoxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2_m( @@ -634,7 +634,7 @@ void test_vsoxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1_m( @@ -643,7 +643,7 @@ void test_vsoxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4_m( @@ -652,7 +652,7 @@ void test_vsoxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2_m( @@ -661,7 +661,7 @@ void test_vsoxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1_m( @@ -670,7 +670,7 @@ void test_vsoxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2_m( @@ -679,7 +679,7 @@ void test_vsoxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2_m( @@ -688,7 +688,7 @@ void test_vsoxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1_m( @@ -697,7 +697,7 @@ void test_vsoxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2_m( @@ -706,7 +706,7 @@ void test_vsoxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4_m( @@ -715,7 +715,7 @@ void test_vsoxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1_m( @@ -724,7 +724,7 @@ void test_vsoxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2_m( @@ -733,7 +733,7 @@ void test_vsoxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4_m( @@ -742,6 +742,6 @@ void test_vsoxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c index ec279c6ee3ed..97d77fc18ad9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4( @@ -49,7 +49,7 @@ void test_vsoxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2( @@ -58,7 +58,7 @@ void test_vsoxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1( @@ -67,7 +67,7 @@ void test_vsoxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2( @@ -76,7 +76,7 @@ void test_vsoxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4( @@ -85,7 +85,7 @@ void test_vsoxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1( @@ -94,7 +94,7 @@ void test_vsoxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2( @@ -103,7 +103,7 @@ void test_vsoxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4( @@ -112,7 +112,7 @@ void test_vsoxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8( @@ -121,7 +121,7 @@ void test_vsoxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf8(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4( @@ -130,7 +130,7 @@ void test_vsoxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2( @@ -139,7 +139,7 @@ void test_vsoxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1( @@ -148,7 +148,7 @@ void test_vsoxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2( @@ -157,7 +157,7 @@ void test_vsoxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4( @@ -166,7 +166,7 @@ void test_vsoxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4( @@ -175,7 +175,7 @@ void test_vsoxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2( @@ -184,7 +184,7 @@ void test_vsoxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1( @@ -193,7 +193,7 @@ void test_vsoxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2( @@ -202,7 +202,7 @@ void test_vsoxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4( @@ -211,7 +211,7 @@ void test_vsoxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1( @@ -229,7 +229,7 @@ void test_vsoxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2( @@ -238,7 +238,7 @@ void test_vsoxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4( @@ -247,7 +247,7 @@ void test_vsoxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1( @@ -256,7 +256,7 @@ void test_vsoxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2( @@ -265,7 +265,7 @@ void test_vsoxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4( @@ -274,7 +274,7 @@ void test_vsoxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8( @@ -283,7 +283,7 @@ void test_vsoxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf8(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4( @@ -292,7 +292,7 @@ void test_vsoxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2( @@ -301,7 +301,7 @@ void test_vsoxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1( @@ -310,7 +310,7 @@ void test_vsoxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2( @@ -319,7 +319,7 @@ void test_vsoxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4( @@ -328,7 +328,7 @@ void test_vsoxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4( @@ -337,7 +337,7 @@ void test_vsoxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2( @@ -346,7 +346,7 @@ void test_vsoxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1( @@ -355,7 +355,7 @@ void test_vsoxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2( @@ -364,7 +364,7 @@ void test_vsoxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4( @@ -373,7 +373,7 @@ void test_vsoxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2( @@ -382,7 +382,7 @@ void test_vsoxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32mf2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1( @@ -391,7 +391,7 @@ void test_vsoxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2( @@ -400,7 +400,7 @@ void test_vsoxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4( @@ -409,7 +409,7 @@ void test_vsoxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1( @@ -418,7 +418,7 @@ void test_vsoxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m1(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2( @@ -427,7 +427,7 @@ void test_vsoxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m2(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4( @@ -436,7 +436,7 @@ void test_vsoxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m4(base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4_m( @@ -445,7 +445,7 @@ void test_vsoxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2_m( @@ -472,7 +472,7 @@ void test_vsoxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4_m( @@ -481,7 +481,7 @@ void test_vsoxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2_m( @@ -490,7 +490,7 @@ void test_vsoxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1_m( @@ -499,7 +499,7 @@ void test_vsoxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2_m( @@ -508,7 +508,7 @@ void test_vsoxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4_m( @@ -517,7 +517,7 @@ void test_vsoxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1_m( @@ -526,7 +526,7 @@ void test_vsoxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2_m( @@ -535,7 +535,7 @@ void test_vsoxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4_m( @@ -544,7 +544,7 @@ void test_vsoxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8_m( @@ -553,7 +553,7 @@ void test_vsoxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4_m( @@ -562,7 +562,7 @@ void test_vsoxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2_m( @@ -571,7 +571,7 @@ void test_vsoxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1_m( @@ -580,7 +580,7 @@ void test_vsoxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2_m( @@ -589,7 +589,7 @@ void test_vsoxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4_m( @@ -598,7 +598,7 @@ void test_vsoxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4_m( @@ -607,7 +607,7 @@ void test_vsoxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2_m( @@ -616,7 +616,7 @@ void test_vsoxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1_m( @@ -625,7 +625,7 @@ void test_vsoxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2_m( @@ -634,7 +634,7 @@ void test_vsoxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4_m( @@ -643,7 +643,7 @@ void test_vsoxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2_m( @@ -652,7 +652,7 @@ void test_vsoxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1_m( @@ -661,7 +661,7 @@ void test_vsoxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2_m( @@ -670,7 +670,7 @@ void test_vsoxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4_m( @@ -679,7 +679,7 @@ void test_vsoxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1_m( @@ -688,7 +688,7 @@ void test_vsoxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2_m( @@ -697,7 +697,7 @@ void test_vsoxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4_m( @@ -706,7 +706,7 @@ void test_vsoxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8_m( @@ -715,7 +715,7 @@ void test_vsoxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4_m( @@ -724,7 +724,7 @@ void test_vsoxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2_m( @@ -733,7 +733,7 @@ void test_vsoxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1_m( @@ -742,7 +742,7 @@ void test_vsoxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2_m( @@ -751,7 +751,7 @@ void test_vsoxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4_m( @@ -760,7 +760,7 @@ void test_vsoxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4_m( @@ -769,7 +769,7 @@ void test_vsoxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2_m( @@ -778,7 +778,7 @@ void test_vsoxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1_m( @@ -787,7 +787,7 @@ void test_vsoxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2_m( @@ -796,7 +796,7 @@ void test_vsoxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4_m( @@ -805,7 +805,7 @@ void test_vsoxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2_m( @@ -814,7 +814,7 @@ void test_vsoxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1_m( @@ -823,7 +823,7 @@ void test_vsoxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2_m( @@ -832,7 +832,7 @@ void test_vsoxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4_m( @@ -841,7 +841,7 @@ void test_vsoxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1_m( @@ -850,7 +850,7 @@ void test_vsoxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2_m( @@ -859,7 +859,7 @@ void test_vsoxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4_m( @@ -868,6 +868,6 @@ void test_vsoxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg2ei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsoxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c index 8fb0b2eea66a..81a58ebc15ef 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsoxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1( @@ -58,7 +58,7 @@ void test_vsoxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2( @@ -67,7 +67,7 @@ void test_vsoxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1( @@ -76,7 +76,7 @@ void test_vsoxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2( @@ -85,7 +85,7 @@ void test_vsoxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsoxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsoxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsoxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1( @@ -121,7 +121,7 @@ void test_vsoxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2( @@ -130,7 +130,7 @@ void test_vsoxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsoxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsoxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1( @@ -157,7 +157,7 @@ void test_vsoxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2( @@ -166,7 +166,7 @@ void test_vsoxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsoxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1( @@ -184,7 +184,7 @@ void test_vsoxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2( @@ -193,7 +193,7 @@ void test_vsoxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1( @@ -202,7 +202,7 @@ void test_vsoxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2( @@ -211,7 +211,7 @@ void test_vsoxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsoxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsoxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsoxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1( @@ -247,7 +247,7 @@ void test_vsoxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2( @@ -256,7 +256,7 @@ void test_vsoxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsoxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsoxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1( @@ -283,7 +283,7 @@ void test_vsoxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2( @@ -292,7 +292,7 @@ void test_vsoxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsoxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1( @@ -310,7 +310,7 @@ void test_vsoxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2( @@ -319,7 +319,7 @@ void test_vsoxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1( @@ -328,7 +328,7 @@ void test_vsoxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2( @@ -337,7 +337,7 @@ void test_vsoxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsoxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsoxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsoxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsoxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsoxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsoxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsoxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsoxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsoxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsoxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsoxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsoxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsoxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsoxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsoxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsoxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsoxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsoxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsoxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsoxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsoxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsoxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsoxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsoxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsoxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsoxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsoxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsoxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsoxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsoxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsoxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsoxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsoxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsoxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsoxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsoxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsoxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c index ad3b83c3fccc..5240cd16ada0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsoxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1( @@ -58,7 +58,7 @@ void test_vsoxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2( @@ -67,7 +67,7 @@ void test_vsoxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1( @@ -76,7 +76,7 @@ void test_vsoxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2( @@ -85,7 +85,7 @@ void test_vsoxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsoxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsoxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsoxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1( @@ -121,7 +121,7 @@ void test_vsoxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2( @@ -130,7 +130,7 @@ void test_vsoxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsoxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsoxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1( @@ -157,7 +157,7 @@ void test_vsoxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2( @@ -166,7 +166,7 @@ void test_vsoxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsoxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1( @@ -184,7 +184,7 @@ void test_vsoxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2( @@ -193,7 +193,7 @@ void test_vsoxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1( @@ -202,7 +202,7 @@ void test_vsoxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2( @@ -211,7 +211,7 @@ void test_vsoxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsoxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsoxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsoxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1( @@ -247,7 +247,7 @@ void test_vsoxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2( @@ -256,7 +256,7 @@ void test_vsoxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsoxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsoxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1( @@ -283,7 +283,7 @@ void test_vsoxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2( @@ -292,7 +292,7 @@ void test_vsoxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsoxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1( @@ -310,7 +310,7 @@ void test_vsoxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2( @@ -319,7 +319,7 @@ void test_vsoxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1( @@ -328,7 +328,7 @@ void test_vsoxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2( @@ -337,7 +337,7 @@ void test_vsoxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsoxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsoxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsoxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsoxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsoxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsoxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsoxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsoxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsoxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsoxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsoxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsoxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsoxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsoxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsoxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsoxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsoxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsoxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsoxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsoxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsoxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsoxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsoxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsoxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsoxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsoxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsoxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsoxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsoxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsoxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsoxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsoxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsoxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsoxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsoxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsoxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsoxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c index 33525da434f7..c33354f3f30f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsoxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1( @@ -58,7 +58,7 @@ void test_vsoxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2( @@ -67,7 +67,7 @@ void test_vsoxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1( @@ -76,7 +76,7 @@ void test_vsoxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2( @@ -85,7 +85,7 @@ void test_vsoxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsoxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsoxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsoxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1( @@ -121,7 +121,7 @@ void test_vsoxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4( @@ -130,7 +130,7 @@ void test_vsoxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2( @@ -139,7 +139,7 @@ void test_vsoxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1( @@ -148,7 +148,7 @@ void test_vsoxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2( @@ -157,7 +157,7 @@ void test_vsoxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2( @@ -166,7 +166,7 @@ void test_vsoxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1( @@ -175,7 +175,7 @@ void test_vsoxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2( @@ -184,7 +184,7 @@ void test_vsoxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1( @@ -193,7 +193,7 @@ void test_vsoxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2( @@ -202,7 +202,7 @@ void test_vsoxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8( @@ -211,7 +211,7 @@ void test_vsoxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4( @@ -220,7 +220,7 @@ void test_vsoxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2( @@ -229,7 +229,7 @@ void test_vsoxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1( @@ -238,7 +238,7 @@ void test_vsoxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4( @@ -247,7 +247,7 @@ void test_vsoxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2( @@ -256,7 +256,7 @@ void test_vsoxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1( @@ -265,7 +265,7 @@ void test_vsoxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2( @@ -274,7 +274,7 @@ void test_vsoxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2( @@ -283,7 +283,7 @@ void test_vsoxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1( @@ -292,7 +292,7 @@ void test_vsoxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2( @@ -301,7 +301,7 @@ void test_vsoxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1( @@ -310,7 +310,7 @@ void test_vsoxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2( @@ -319,7 +319,7 @@ void test_vsoxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4_m( @@ -328,7 +328,7 @@ void test_vsoxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2_m( @@ -337,7 +337,7 @@ void test_vsoxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1_m( @@ -346,7 +346,7 @@ void test_vsoxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2_m( @@ -355,7 +355,7 @@ void test_vsoxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2_m( @@ -382,7 +382,7 @@ void test_vsoxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1_m( @@ -391,7 +391,7 @@ void test_vsoxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2_m( @@ -400,7 +400,7 @@ void test_vsoxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8_m( @@ -409,7 +409,7 @@ void test_vsoxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4_m( @@ -418,7 +418,7 @@ void test_vsoxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2_m( @@ -427,7 +427,7 @@ void test_vsoxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1_m( @@ -436,7 +436,7 @@ void test_vsoxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4_m( @@ -445,7 +445,7 @@ void test_vsoxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2_m( @@ -472,7 +472,7 @@ void test_vsoxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2_m( @@ -481,7 +481,7 @@ void test_vsoxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1_m( @@ -490,7 +490,7 @@ void test_vsoxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2_m( @@ -499,7 +499,7 @@ void test_vsoxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1_m( @@ -508,7 +508,7 @@ void test_vsoxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2_m( @@ -517,7 +517,7 @@ void test_vsoxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8_m( @@ -526,7 +526,7 @@ void test_vsoxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4_m( @@ -535,7 +535,7 @@ void test_vsoxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2_m( @@ -544,7 +544,7 @@ void test_vsoxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1_m( @@ -553,7 +553,7 @@ void test_vsoxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4_m( @@ -562,7 +562,7 @@ void test_vsoxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2_m( @@ -571,7 +571,7 @@ void test_vsoxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1_m( @@ -580,7 +580,7 @@ void test_vsoxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2_m( @@ -589,7 +589,7 @@ void test_vsoxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2_m( @@ -598,7 +598,7 @@ void test_vsoxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1_m( @@ -607,7 +607,7 @@ void test_vsoxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2_m( @@ -616,7 +616,7 @@ void test_vsoxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1_m( @@ -625,7 +625,7 @@ void test_vsoxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2_m( @@ -634,6 +634,6 @@ void test_vsoxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c index 31f1a2615261..148c76692b0f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsoxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1( @@ -58,7 +58,7 @@ void test_vsoxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2( @@ -67,7 +67,7 @@ void test_vsoxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1( @@ -76,7 +76,7 @@ void test_vsoxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2( @@ -85,7 +85,7 @@ void test_vsoxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsoxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsoxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsoxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1( @@ -121,7 +121,7 @@ void test_vsoxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2( @@ -130,7 +130,7 @@ void test_vsoxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsoxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsoxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1( @@ -157,7 +157,7 @@ void test_vsoxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2( @@ -166,7 +166,7 @@ void test_vsoxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsoxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1( @@ -184,7 +184,7 @@ void test_vsoxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2( @@ -193,7 +193,7 @@ void test_vsoxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1( @@ -202,7 +202,7 @@ void test_vsoxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2( @@ -211,7 +211,7 @@ void test_vsoxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsoxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsoxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsoxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1( @@ -247,7 +247,7 @@ void test_vsoxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2( @@ -256,7 +256,7 @@ void test_vsoxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsoxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsoxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1( @@ -283,7 +283,7 @@ void test_vsoxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2( @@ -292,7 +292,7 @@ void test_vsoxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsoxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1( @@ -310,7 +310,7 @@ void test_vsoxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2( @@ -319,7 +319,7 @@ void test_vsoxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1( @@ -328,7 +328,7 @@ void test_vsoxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2( @@ -337,7 +337,7 @@ void test_vsoxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsoxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsoxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsoxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsoxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsoxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsoxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsoxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsoxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsoxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsoxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsoxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsoxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsoxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsoxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsoxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsoxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsoxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsoxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsoxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsoxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsoxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsoxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsoxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsoxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsoxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsoxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsoxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsoxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsoxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsoxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsoxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsoxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsoxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsoxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsoxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsoxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsoxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg3ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsoxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c index e33ec24cd6f2..214eeb21a033 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsoxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1( @@ -58,7 +58,7 @@ void test_vsoxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2( @@ -67,7 +67,7 @@ void test_vsoxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1( @@ -76,7 +76,7 @@ void test_vsoxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2( @@ -85,7 +85,7 @@ void test_vsoxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsoxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsoxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsoxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1( @@ -121,7 +121,7 @@ void test_vsoxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2( @@ -130,7 +130,7 @@ void test_vsoxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsoxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsoxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1( @@ -157,7 +157,7 @@ void test_vsoxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2( @@ -166,7 +166,7 @@ void test_vsoxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsoxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1( @@ -184,7 +184,7 @@ void test_vsoxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2( @@ -193,7 +193,7 @@ void test_vsoxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1( @@ -202,7 +202,7 @@ void test_vsoxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2( @@ -211,7 +211,7 @@ void test_vsoxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsoxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsoxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsoxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1( @@ -247,7 +247,7 @@ void test_vsoxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2( @@ -256,7 +256,7 @@ void test_vsoxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsoxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsoxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1( @@ -283,7 +283,7 @@ void test_vsoxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2( @@ -292,7 +292,7 @@ void test_vsoxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsoxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1( @@ -310,7 +310,7 @@ void test_vsoxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2( @@ -319,7 +319,7 @@ void test_vsoxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1( @@ -328,7 +328,7 @@ void test_vsoxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2( @@ -337,7 +337,7 @@ void test_vsoxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsoxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsoxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsoxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsoxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsoxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsoxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsoxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsoxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsoxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsoxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsoxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsoxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsoxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsoxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsoxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsoxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsoxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsoxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsoxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsoxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsoxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsoxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsoxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsoxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsoxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsoxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsoxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsoxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsoxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsoxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsoxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsoxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsoxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsoxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsoxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsoxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsoxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c index 4cb123eb7907..c09daf717bef 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsoxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1( @@ -58,7 +58,7 @@ void test_vsoxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2( @@ -67,7 +67,7 @@ void test_vsoxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1( @@ -76,7 +76,7 @@ void test_vsoxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2( @@ -85,7 +85,7 @@ void test_vsoxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsoxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsoxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsoxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1( @@ -121,7 +121,7 @@ void test_vsoxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2( @@ -130,7 +130,7 @@ void test_vsoxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsoxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsoxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1( @@ -157,7 +157,7 @@ void test_vsoxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2( @@ -166,7 +166,7 @@ void test_vsoxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsoxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1( @@ -184,7 +184,7 @@ void test_vsoxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2( @@ -193,7 +193,7 @@ void test_vsoxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1( @@ -202,7 +202,7 @@ void test_vsoxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2( @@ -211,7 +211,7 @@ void test_vsoxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsoxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsoxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsoxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1( @@ -247,7 +247,7 @@ void test_vsoxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2( @@ -256,7 +256,7 @@ void test_vsoxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsoxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsoxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1( @@ -283,7 +283,7 @@ void test_vsoxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2( @@ -292,7 +292,7 @@ void test_vsoxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsoxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1( @@ -310,7 +310,7 @@ void test_vsoxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2( @@ -319,7 +319,7 @@ void test_vsoxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1( @@ -328,7 +328,7 @@ void test_vsoxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2( @@ -337,7 +337,7 @@ void test_vsoxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsoxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsoxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsoxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsoxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsoxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsoxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsoxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsoxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsoxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsoxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsoxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsoxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsoxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsoxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsoxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsoxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsoxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsoxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsoxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsoxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsoxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsoxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsoxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsoxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsoxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsoxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsoxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsoxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsoxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsoxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsoxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsoxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsoxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsoxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsoxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsoxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsoxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c index 51c81ab43f45..598c8b93184b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsoxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1( @@ -58,7 +58,7 @@ void test_vsoxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2( @@ -67,7 +67,7 @@ void test_vsoxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1( @@ -76,7 +76,7 @@ void test_vsoxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2( @@ -85,7 +85,7 @@ void test_vsoxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsoxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsoxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsoxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1( @@ -121,7 +121,7 @@ void test_vsoxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4( @@ -130,7 +130,7 @@ void test_vsoxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2( @@ -139,7 +139,7 @@ void test_vsoxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1( @@ -148,7 +148,7 @@ void test_vsoxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2( @@ -157,7 +157,7 @@ void test_vsoxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2( @@ -166,7 +166,7 @@ void test_vsoxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1( @@ -175,7 +175,7 @@ void test_vsoxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2( @@ -184,7 +184,7 @@ void test_vsoxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1( @@ -193,7 +193,7 @@ void test_vsoxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2( @@ -202,7 +202,7 @@ void test_vsoxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8( @@ -211,7 +211,7 @@ void test_vsoxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4( @@ -220,7 +220,7 @@ void test_vsoxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2( @@ -229,7 +229,7 @@ void test_vsoxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1( @@ -238,7 +238,7 @@ void test_vsoxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4( @@ -247,7 +247,7 @@ void test_vsoxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2( @@ -256,7 +256,7 @@ void test_vsoxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1( @@ -265,7 +265,7 @@ void test_vsoxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2( @@ -274,7 +274,7 @@ void test_vsoxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2( @@ -283,7 +283,7 @@ void test_vsoxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1( @@ -292,7 +292,7 @@ void test_vsoxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2( @@ -301,7 +301,7 @@ void test_vsoxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1( @@ -310,7 +310,7 @@ void test_vsoxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2( @@ -319,7 +319,7 @@ void test_vsoxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4_m( @@ -328,7 +328,7 @@ void test_vsoxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2_m( @@ -337,7 +337,7 @@ void test_vsoxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1_m( @@ -346,7 +346,7 @@ void test_vsoxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2_m( @@ -355,7 +355,7 @@ void test_vsoxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2_m( @@ -382,7 +382,7 @@ void test_vsoxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1_m( @@ -391,7 +391,7 @@ void test_vsoxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2_m( @@ -400,7 +400,7 @@ void test_vsoxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8_m( @@ -409,7 +409,7 @@ void test_vsoxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4_m( @@ -418,7 +418,7 @@ void test_vsoxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2_m( @@ -427,7 +427,7 @@ void test_vsoxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1_m( @@ -436,7 +436,7 @@ void test_vsoxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4_m( @@ -445,7 +445,7 @@ void test_vsoxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2_m( @@ -472,7 +472,7 @@ void test_vsoxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2_m( @@ -481,7 +481,7 @@ void test_vsoxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1_m( @@ -490,7 +490,7 @@ void test_vsoxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2_m( @@ -499,7 +499,7 @@ void test_vsoxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1_m( @@ -508,7 +508,7 @@ void test_vsoxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2_m( @@ -517,7 +517,7 @@ void test_vsoxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8_m( @@ -526,7 +526,7 @@ void test_vsoxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4_m( @@ -535,7 +535,7 @@ void test_vsoxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2_m( @@ -544,7 +544,7 @@ void test_vsoxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1_m( @@ -553,7 +553,7 @@ void test_vsoxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4_m( @@ -562,7 +562,7 @@ void test_vsoxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2_m( @@ -571,7 +571,7 @@ void test_vsoxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1_m( @@ -580,7 +580,7 @@ void test_vsoxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2_m( @@ -589,7 +589,7 @@ void test_vsoxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2_m( @@ -598,7 +598,7 @@ void test_vsoxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1_m( @@ -607,7 +607,7 @@ void test_vsoxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2_m( @@ -616,7 +616,7 @@ void test_vsoxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1_m( @@ -625,7 +625,7 @@ void test_vsoxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2_m( @@ -634,6 +634,6 @@ void test_vsoxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c index ed5a32881bd8..f7c77ba59a7c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2( @@ -40,7 +40,7 @@ void test_vsoxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsoxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1( @@ -58,7 +58,7 @@ void test_vsoxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2( @@ -67,7 +67,7 @@ void test_vsoxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1( @@ -76,7 +76,7 @@ void test_vsoxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2( @@ -85,7 +85,7 @@ void test_vsoxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsoxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsoxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsoxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1( @@ -121,7 +121,7 @@ void test_vsoxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2( @@ -130,7 +130,7 @@ void test_vsoxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsoxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsoxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1( @@ -157,7 +157,7 @@ void test_vsoxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2( @@ -166,7 +166,7 @@ void test_vsoxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsoxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1( @@ -184,7 +184,7 @@ void test_vsoxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2( @@ -193,7 +193,7 @@ void test_vsoxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1( @@ -202,7 +202,7 @@ void test_vsoxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2( @@ -211,7 +211,7 @@ void test_vsoxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsoxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsoxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsoxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1( @@ -247,7 +247,7 @@ void test_vsoxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2( @@ -256,7 +256,7 @@ void test_vsoxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsoxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsoxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1( @@ -283,7 +283,7 @@ void test_vsoxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2( @@ -292,7 +292,7 @@ void test_vsoxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsoxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1( @@ -310,7 +310,7 @@ void test_vsoxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2( @@ -319,7 +319,7 @@ void test_vsoxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1( @@ -328,7 +328,7 @@ void test_vsoxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2( @@ -337,7 +337,7 @@ void test_vsoxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsoxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsoxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsoxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsoxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsoxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsoxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsoxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsoxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsoxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsoxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsoxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsoxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsoxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsoxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsoxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsoxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsoxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsoxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsoxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsoxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsoxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsoxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsoxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsoxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsoxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsoxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsoxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsoxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsoxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsoxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsoxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsoxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsoxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsoxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsoxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsoxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsoxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg4ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsoxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c index 2121fde0481f..fb394120b013 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c index 66fcdede626d..ed370aee8e5e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c index 0751492e60ed..fa513529b769 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c index 797b524a788c..85caabc462b2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg5ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsoxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c index e0ffcfba301e..1d390469bc9d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c index a84087112d84..dd0505a0dec9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c index c539bfe74f02..4d59dcfe6e1e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c index 34856d7b4b15..08c8b53a46ba 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg6ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsoxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c index cb0ea3591171..cf599d715f30 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c index 6286ed64d73f..55059c24e304 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c index 5179dddab0e5..82e996e22325 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c index 44d4238ff6fd..a05e1659b643 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg7ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsoxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c index c32cba59bc59..5cf439f86e36 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c index 704b9ee2f881..0af1b55e720e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c index 80a2e49c0c2e..b9698b6abdf9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c index 8199d2a21c7b..8ffd9d69617b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsoxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsoxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsoxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1( @@ -49,7 +49,7 @@ void test_vsoxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1( @@ -58,7 +58,7 @@ void test_vsoxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsoxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsoxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsoxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1( @@ -94,7 +94,7 @@ void test_vsoxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsoxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsoxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1( @@ -121,7 +121,7 @@ void test_vsoxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsoxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1( @@ -139,7 +139,7 @@ void test_vsoxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1( @@ -148,7 +148,7 @@ void test_vsoxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsoxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsoxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsoxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1( @@ -184,7 +184,7 @@ void test_vsoxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsoxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsoxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1( @@ -211,7 +211,7 @@ void test_vsoxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsoxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1( @@ -229,7 +229,7 @@ void test_vsoxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1( @@ -238,7 +238,7 @@ void test_vsoxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsoxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsoxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsoxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsoxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsoxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsoxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsoxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsoxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsoxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsoxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsoxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsoxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsoxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsoxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsoxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsoxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsoxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsoxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsoxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsoxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsoxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsoxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsoxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsoxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsoxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsoxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsoxseg8ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsoxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsra.c index d46852ff36da..ecc99aba3754 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsra.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsra_vv_i8mf8(op1, shift, vl); + return __riscv_vsra_vv_i8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf8(op1, shift, vl); + return __riscv_vsra_vx_i8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsra_vv_i8mf4(op1, shift, vl); + return __riscv_vsra_vv_i8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf4(op1, shift, vl); + return __riscv_vsra_vx_i8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsra_vv_i8mf2(op1, shift, vl); + return __riscv_vsra_vv_i8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf2(op1, shift, vl); + return __riscv_vsra_vx_i8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsra_vv_i8m1(op1, shift, vl); + return __riscv_vsra_vv_i8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m1(op1, shift, vl); + return __riscv_vsra_vx_i8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsra_vv_i8m2(op1, shift, vl); + return __riscv_vsra_vv_i8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m2(op1, shift, vl); + return __riscv_vsra_vx_i8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsra_vv_i8m4(op1, shift, vl); + return __riscv_vsra_vv_i8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m4(op1, shift, vl); + return __riscv_vsra_vx_i8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsra_vv_i8m8(op1, shift, vl); + return __riscv_vsra_vv_i8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m8(op1, shift, vl); + return __riscv_vsra_vx_i8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsra_vv_i16mf4(op1, shift, vl); + return __riscv_vsra_vv_i16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf4(op1, shift, vl); + return __riscv_vsra_vx_i16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsra_vv_i16mf2(op1, shift, vl); + return __riscv_vsra_vv_i16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf2(op1, shift, vl); + return __riscv_vsra_vx_i16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsra_vv_i16m1(op1, shift, vl); + return __riscv_vsra_vv_i16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m1(op1, shift, vl); + return __riscv_vsra_vx_i16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsra_vv_i16m2(op1, shift, vl); + return __riscv_vsra_vv_i16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m2(op1, shift, vl); + return __riscv_vsra_vx_i16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsra_vv_i16m4(op1, shift, vl); + return __riscv_vsra_vv_i16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m4(op1, shift, vl); + return __riscv_vsra_vx_i16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsra_vv_i16m8(op1, shift, vl); + return __riscv_vsra_vv_i16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m8(op1, shift, vl); + return __riscv_vsra_vx_i16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsra_vv_i32mf2(op1, shift, vl); + return __riscv_vsra_vv_i32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32mf2(op1, shift, vl); + return __riscv_vsra_vx_i32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsra_vv_i32m1(op1, shift, vl); + return __riscv_vsra_vv_i32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m1(op1, shift, vl); + return __riscv_vsra_vx_i32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsra_vv_i32m2(op1, shift, vl); + return __riscv_vsra_vv_i32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m2(op1, shift, vl); + return __riscv_vsra_vx_i32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsra_vv_i32m4(op1, shift, vl); + return __riscv_vsra_vv_i32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m4(op1, shift, vl); + return __riscv_vsra_vx_i32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsra_vv_i32m8(op1, shift, vl); + return __riscv_vsra_vv_i32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m8(op1, shift, vl); + return __riscv_vsra_vx_i32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsra_vv_i64m1(op1, shift, vl); + return __riscv_vsra_vv_i64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m1(op1, shift, vl); + return __riscv_vsra_vx_i64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsra_vv_i64m2(op1, shift, vl); + return __riscv_vsra_vv_i64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m2(op1, shift, vl); + return __riscv_vsra_vx_i64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsra_vv_i64m4(op1, shift, vl); + return __riscv_vsra_vv_i64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m4(op1, shift, vl); + return __riscv_vsra_vx_i64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsra_vv_i64m8(op1, shift, vl); + return __riscv_vsra_vv_i64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m8(op1, shift, vl); + return __riscv_vsra_vx_i64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsra_vv_i8mf8_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf8_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsra_vv_i8mf4_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf4_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsra_vv_i8mf2_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf2_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsra_vv_i8m1_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m1_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsra_vv_i8m2_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m2_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsra_vv_i8m4_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m4_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsra_vv_i8m8_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m8_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsra_vv_i16mf4_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf4_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsra_vv_i16mf2_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf2_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsra_vv_i16m1_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m1_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsra_vv_i16m2_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m2_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsra_vv_i16m4_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m4_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsra_vv_i16m8_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m8_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsra_vv_i32mf2_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32mf2_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsra_vv_i32m1_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m1_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsra_vv_i32m2_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m2_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsra_vv_i32m4_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m4_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsra_vv_i32m8_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m8_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsra_vv_i64m1_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m1_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsra_vv_i64m2_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m2_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsra_vv_i64m4_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m4_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsra_vv_i64m8_m(mask, op1, shift, vl); + return __riscv_vsra_vv_i64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m8_m(mask, op1, shift, vl); + return __riscv_vsra_vx_i64m8_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsrl.c index b81e16dd7b84..b5f62a0bc168 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsrl.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsrl_vv_u8mf8(op1, shift, vl); + return __riscv_vsrl_vv_u8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf8(op1, shift, vl); + return __riscv_vsrl_vx_u8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsrl_vv_u8mf4(op1, shift, vl); + return __riscv_vsrl_vv_u8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf4(op1, shift, vl); + return __riscv_vsrl_vx_u8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsrl_vv_u8mf2(op1, shift, vl); + return __riscv_vsrl_vv_u8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf2(op1, shift, vl); + return __riscv_vsrl_vx_u8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsrl_vv_u8m1(op1, shift, vl); + return __riscv_vsrl_vv_u8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m1(op1, shift, vl); + return __riscv_vsrl_vx_u8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsrl_vv_u8m2(op1, shift, vl); + return __riscv_vsrl_vv_u8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m2(op1, shift, vl); + return __riscv_vsrl_vx_u8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsrl_vv_u8m4(op1, shift, vl); + return __riscv_vsrl_vv_u8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m4(op1, shift, vl); + return __riscv_vsrl_vx_u8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsrl_vv_u8m8(op1, shift, vl); + return __riscv_vsrl_vv_u8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m8(op1, shift, vl); + return __riscv_vsrl_vx_u8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsrl_vv_u16mf4(op1, shift, vl); + return __riscv_vsrl_vv_u16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf4(op1, shift, vl); + return __riscv_vsrl_vx_u16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsrl_vv_u16mf2(op1, shift, vl); + return __riscv_vsrl_vv_u16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf2(op1, shift, vl); + return __riscv_vsrl_vx_u16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsrl_vv_u16m1(op1, shift, vl); + return __riscv_vsrl_vv_u16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m1(op1, shift, vl); + return __riscv_vsrl_vx_u16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsrl_vv_u16m2(op1, shift, vl); + return __riscv_vsrl_vv_u16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m2(op1, shift, vl); + return __riscv_vsrl_vx_u16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsrl_vv_u16m4(op1, shift, vl); + return __riscv_vsrl_vv_u16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m4(op1, shift, vl); + return __riscv_vsrl_vx_u16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsrl_vv_u16m8(op1, shift, vl); + return __riscv_vsrl_vv_u16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m8(op1, shift, vl); + return __riscv_vsrl_vx_u16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsrl_vv_u32mf2(op1, shift, vl); + return __riscv_vsrl_vv_u32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32mf2(op1, shift, vl); + return __riscv_vsrl_vx_u32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsrl_vv_u32m1(op1, shift, vl); + return __riscv_vsrl_vv_u32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m1(op1, shift, vl); + return __riscv_vsrl_vx_u32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsrl_vv_u32m2(op1, shift, vl); + return __riscv_vsrl_vv_u32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m2(op1, shift, vl); + return __riscv_vsrl_vx_u32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsrl_vv_u32m4(op1, shift, vl); + return __riscv_vsrl_vv_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m4(op1, shift, vl); + return __riscv_vsrl_vx_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsrl_vv_u32m8(op1, shift, vl); + return __riscv_vsrl_vv_u32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m8(op1, shift, vl); + return __riscv_vsrl_vx_u32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1( @@ -336,7 +336,7 @@ vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsrl_vv_u64m1(op1, shift, vl); + return __riscv_vsrl_vv_u64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1( @@ -345,7 +345,7 @@ vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m1(op1, shift, vl); + return __riscv_vsrl_vx_u64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2( @@ -354,7 +354,7 @@ vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsrl_vv_u64m2(op1, shift, vl); + return __riscv_vsrl_vv_u64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2( @@ -363,7 +363,7 @@ vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m2(op1, shift, vl); + return __riscv_vsrl_vx_u64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4( @@ -372,7 +372,7 @@ vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsrl_vv_u64m4(op1, shift, vl); + return __riscv_vsrl_vv_u64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4( @@ -381,7 +381,7 @@ vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m4(op1, shift, vl); + return __riscv_vsrl_vx_u64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8( @@ -390,7 +390,7 @@ vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsrl_vv_u64m8(op1, shift, vl); + return __riscv_vsrl_vv_u64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m8(op1, shift, vl); + return __riscv_vsrl_vx_u64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsrl_vv_u8mf8_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m( @@ -417,7 +417,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf8_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m( @@ -426,7 +426,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsrl_vv_u8mf4_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m( @@ -435,7 +435,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf4_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m( @@ -444,7 +444,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsrl_vv_u8mf2_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m( @@ -453,7 +453,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf2_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m( @@ -462,7 +462,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsrl_vv_u8m1_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m( @@ -471,7 +471,7 @@ vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m1_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m( @@ -480,7 +480,7 @@ vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsrl_vv_u8m2_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m( @@ -489,7 +489,7 @@ vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m2_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m( @@ -498,7 +498,7 @@ vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsrl_vv_u8m4_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m( @@ -507,7 +507,7 @@ vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m4_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m( @@ -516,7 +516,7 @@ vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsrl_vv_u8m8_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m( @@ -525,7 +525,7 @@ vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m8_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m( @@ -534,7 +534,7 @@ vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsrl_vv_u16mf4_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m( @@ -543,7 +543,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf4_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m( @@ -552,7 +552,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsrl_vv_u16mf2_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m( @@ -561,7 +561,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf2_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m( @@ -570,7 +570,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsrl_vv_u16m1_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m( @@ -579,7 +579,7 @@ vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m1_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m( @@ -588,7 +588,7 @@ vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsrl_vv_u16m2_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m( @@ -597,7 +597,7 @@ vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m2_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m( @@ -606,7 +606,7 @@ vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsrl_vv_u16m4_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m( @@ -615,7 +615,7 @@ vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m4_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m( @@ -624,7 +624,7 @@ vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsrl_vv_u16m8_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m( @@ -633,7 +633,7 @@ vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m8_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m( @@ -642,7 +642,7 @@ vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsrl_vv_u32mf2_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m( @@ -651,7 +651,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32mf2_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m( @@ -660,7 +660,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsrl_vv_u32m1_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m( @@ -669,7 +669,7 @@ vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m1_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m( @@ -678,7 +678,7 @@ vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsrl_vv_u32m2_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m( @@ -687,7 +687,7 @@ vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m2_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m( @@ -696,7 +696,7 @@ vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsrl_vv_u32m4_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m( @@ -705,7 +705,7 @@ vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m4_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m( @@ -714,7 +714,7 @@ vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsrl_vv_u32m8_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m( @@ -723,7 +723,7 @@ vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m8_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m( @@ -732,7 +732,7 @@ vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsrl_vv_u64m1_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m( @@ -741,7 +741,7 @@ vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m1_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m( @@ -750,7 +750,7 @@ vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsrl_vv_u64m2_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m( @@ -759,7 +759,7 @@ vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m2_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m( @@ -768,7 +768,7 @@ vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsrl_vv_u64m4_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m( @@ -777,7 +777,7 @@ vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m4_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m( @@ -786,7 +786,7 @@ vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsrl_vv_u64m8_m(mask, op1, shift, vl); + return __riscv_vsrl_vv_u64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m8_m(mask, op1, shift, vl); + return __riscv_vsrl_vx_u64m8_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse16.c index 9c02316c8622..9388c91253be 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { - return vsse16_v_f16mf4(base, bstride, value, vl); + return __riscv_vsse16_v_f16mf4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsse16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { - return vsse16_v_f16mf2(base, bstride, value, vl); + return __riscv_vsse16_v_f16mf2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsse16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { - return vsse16_v_f16m1(base, bstride, value, vl); + return __riscv_vsse16_v_f16m1(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsse16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { - return vsse16_v_f16m2(base, bstride, value, vl); + return __riscv_vsse16_v_f16m2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_f16m4( @@ -49,7 +49,7 @@ void test_vsse16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { - return vsse16_v_f16m4(base, bstride, value, vl); + return __riscv_vsse16_v_f16m4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_f16m8( @@ -58,7 +58,7 @@ void test_vsse16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16m8(_Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { - return vsse16_v_f16m8(base, bstride, value, vl); + return __riscv_vsse16_v_f16m8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16mf4( @@ -67,7 +67,7 @@ void test_vsse16_v_f16m8(_Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { - return vsse16_v_i16mf4(base, bstride, value, vl); + return __riscv_vsse16_v_i16mf4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16mf2( @@ -76,7 +76,7 @@ void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { - return vsse16_v_i16mf2(base, bstride, value, vl); + return __riscv_vsse16_v_i16mf2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16m1( @@ -85,7 +85,7 @@ void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { - return vsse16_v_i16m1(base, bstride, value, vl); + return __riscv_vsse16_v_i16m1(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16m2( @@ -94,7 +94,7 @@ void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { - return vsse16_v_i16m2(base, bstride, value, vl); + return __riscv_vsse16_v_i16m2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16m4( @@ -103,7 +103,7 @@ void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { - return vsse16_v_i16m4(base, bstride, value, vl); + return __riscv_vsse16_v_i16m4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16m8( @@ -112,7 +112,7 @@ void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { - return vsse16_v_i16m8(base, bstride, value, vl); + return __riscv_vsse16_v_i16m8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16mf4( @@ -121,7 +121,7 @@ void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { - return vsse16_v_u16mf4(base, bstride, value, vl); + return __riscv_vsse16_v_u16mf4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16mf2( @@ -130,7 +130,7 @@ void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { - return vsse16_v_u16mf2(base, bstride, value, vl); + return __riscv_vsse16_v_u16mf2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16m1( @@ -139,7 +139,7 @@ void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { - return vsse16_v_u16m1(base, bstride, value, vl); + return __riscv_vsse16_v_u16m1(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16m2( @@ -148,7 +148,7 @@ void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { - return vsse16_v_u16m2(base, bstride, value, vl); + return __riscv_vsse16_v_u16m2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16m4( @@ -157,7 +157,7 @@ void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { - return vsse16_v_u16m4(base, bstride, value, vl); + return __riscv_vsse16_v_u16m4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16m8( @@ -166,7 +166,7 @@ void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { - return vsse16_v_u16m8(base, bstride, value, vl); + return __riscv_vsse16_v_u16m8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_f16mf4_m( @@ -175,7 +175,7 @@ void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { - return vsse16_v_f16mf4_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_f16mf4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_f16mf2_m( @@ -184,7 +184,7 @@ void test_vsse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { - return vsse16_v_f16mf2_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_f16mf2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_f16m1_m( @@ -193,7 +193,7 @@ void test_vsse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { - return vsse16_v_f16m1_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_f16m1_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_f16m2_m( @@ -202,7 +202,7 @@ void test_vsse16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vf // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { - return vsse16_v_f16m2_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_f16m2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_f16m4_m( @@ -211,7 +211,7 @@ void test_vsse16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfl // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { - return vsse16_v_f16m4_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_f16m4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_f16m8_m( @@ -220,7 +220,7 @@ void test_vsse16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfl // CHECK-RV64-NEXT: ret void // void test_vsse16_v_f16m8_m(vbool2_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { - return vsse16_v_f16m8_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_f16m8_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16mf4_m( @@ -229,7 +229,7 @@ void test_vsse16_v_f16m8_m(vbool2_t mask, _Float16 *base, ptrdiff_t bstride, vfl // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { - return vsse16_v_i16mf4_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_i16mf4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16mf2_m( @@ -238,7 +238,7 @@ void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vi // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { - return vsse16_v_i16mf2_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_i16mf2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16m1_m( @@ -247,7 +247,7 @@ void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vi // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { - return vsse16_v_i16m1_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_i16m1_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16m2_m( @@ -256,7 +256,7 @@ void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { - return vsse16_v_i16m2_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_i16m2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16m4_m( @@ -265,7 +265,7 @@ void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { - return vsse16_v_i16m4_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_i16m4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_i16m8_m( @@ -274,7 +274,7 @@ void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { - return vsse16_v_i16m8_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_i16m8_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16mf4_m( @@ -283,7 +283,7 @@ void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride, vint // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { - return vsse16_v_u16mf4_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_u16mf4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16mf2_m( @@ -292,7 +292,7 @@ void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { - return vsse16_v_u16mf2_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_u16mf2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16m1_m( @@ -301,7 +301,7 @@ void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { - return vsse16_v_u16m1_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_u16m1_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16m2_m( @@ -310,7 +310,7 @@ void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vu // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { - return vsse16_v_u16m2_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_u16m2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16m4_m( @@ -319,7 +319,7 @@ void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vui // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { - return vsse16_v_u16m4_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_u16m4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse16_v_u16m8_m( @@ -328,6 +328,6 @@ void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vui // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m8_m(vbool2_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { - return vsse16_v_u16m8_m(mask, base, bstride, value, vl); + return __riscv_vsse16_v_u16m8_m(mask, base, bstride, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse32.c index 6d86b7598020..9f9e9245ff89 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { - return vsse32_v_f32mf2(base, bstride, value, vl); + return __riscv_vsse32_v_f32mf2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_f32m1( @@ -22,7 +22,7 @@ void test_vsse32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { - return vsse32_v_f32m1(base, bstride, value, vl); + return __riscv_vsse32_v_f32m1(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_f32m2( @@ -31,7 +31,7 @@ void test_vsse32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { - return vsse32_v_f32m2(base, bstride, value, vl); + return __riscv_vsse32_v_f32m2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_f32m4( @@ -40,7 +40,7 @@ void test_vsse32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { - return vsse32_v_f32m4(base, bstride, value, vl); + return __riscv_vsse32_v_f32m4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_f32m8( @@ -49,7 +49,7 @@ void test_vsse32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m8(float *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { - return vsse32_v_f32m8(base, bstride, value, vl); + return __riscv_vsse32_v_f32m8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_i32mf2( @@ -58,7 +58,7 @@ void test_vsse32_v_f32m8(float *base, ptrdiff_t bstride, vfloat32m8_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { - return vsse32_v_i32mf2(base, bstride, value, vl); + return __riscv_vsse32_v_i32mf2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_i32m1( @@ -67,7 +67,7 @@ void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { - return vsse32_v_i32m1(base, bstride, value, vl); + return __riscv_vsse32_v_i32m1(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_i32m2( @@ -76,7 +76,7 @@ void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { - return vsse32_v_i32m2(base, bstride, value, vl); + return __riscv_vsse32_v_i32m2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_i32m4( @@ -85,7 +85,7 @@ void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { - return vsse32_v_i32m4(base, bstride, value, vl); + return __riscv_vsse32_v_i32m4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_i32m8( @@ -94,7 +94,7 @@ void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { - return vsse32_v_i32m8(base, bstride, value, vl); + return __riscv_vsse32_v_i32m8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_u32mf2( @@ -103,7 +103,7 @@ void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { - return vsse32_v_u32mf2(base, bstride, value, vl); + return __riscv_vsse32_v_u32mf2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_u32m1( @@ -112,7 +112,7 @@ void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { - return vsse32_v_u32m1(base, bstride, value, vl); + return __riscv_vsse32_v_u32m1(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_u32m2( @@ -121,7 +121,7 @@ void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { - return vsse32_v_u32m2(base, bstride, value, vl); + return __riscv_vsse32_v_u32m2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_u32m4( @@ -130,7 +130,7 @@ void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { - return vsse32_v_u32m4(base, bstride, value, vl); + return __riscv_vsse32_v_u32m4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_u32m8( @@ -139,7 +139,7 @@ void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { - return vsse32_v_u32m8(base, bstride, value, vl); + return __riscv_vsse32_v_u32m8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_f32mf2_m( @@ -148,7 +148,7 @@ void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { - return vsse32_v_f32mf2_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_f32mf2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_f32m1_m( @@ -157,7 +157,7 @@ void test_vsse32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vflo // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { - return vsse32_v_f32m1_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_f32m1_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_f32m2_m( @@ -166,7 +166,7 @@ void test_vsse32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloa // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { - return vsse32_v_f32m2_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_f32m2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_f32m4_m( @@ -175,7 +175,7 @@ void test_vsse32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloa // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { - return vsse32_v_f32m4_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_f32m4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_f32m8_m( @@ -184,7 +184,7 @@ void test_vsse32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m8_m(vbool4_t mask, float *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { - return vsse32_v_f32m8_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_f32m8_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_i32mf2_m( @@ -193,7 +193,7 @@ void test_vsse32_v_f32m8_m(vbool4_t mask, float *base, ptrdiff_t bstride, vfloat // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { - return vsse32_v_i32mf2_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_i32mf2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_i32m1_m( @@ -202,7 +202,7 @@ void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vi // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { - return vsse32_v_i32m1_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_i32m1_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_i32m2_m( @@ -211,7 +211,7 @@ void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { - return vsse32_v_i32m2_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_i32m2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_i32m4_m( @@ -220,7 +220,7 @@ void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { - return vsse32_v_i32m4_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_i32m4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_i32m8_m( @@ -229,7 +229,7 @@ void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { - return vsse32_v_i32m8_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_i32m8_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_u32mf2_m( @@ -238,7 +238,7 @@ void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride, vint // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { - return vsse32_v_u32mf2_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_u32mf2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_u32m1_m( @@ -247,7 +247,7 @@ void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { - return vsse32_v_u32m1_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_u32m1_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_u32m2_m( @@ -256,7 +256,7 @@ void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vu // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { - return vsse32_v_u32m2_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_u32m2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_u32m4_m( @@ -265,7 +265,7 @@ void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vu // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { - return vsse32_v_u32m4_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_u32m4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse32_v_u32m8_m( @@ -274,6 +274,6 @@ void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vui // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m8_m(vbool4_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { - return vsse32_v_u32m8_m(mask, base, bstride, value, vl); + return __riscv_vsse32_v_u32m8_m(mask, base, bstride, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse64.c index 0ebfca2e1009..b20de1428d95 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { - return vsse64_v_f64m1(base, bstride, value, vl); + return __riscv_vsse64_v_f64m1(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_f64m2( @@ -22,7 +22,7 @@ void test_vsse64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t value, si // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { - return vsse64_v_f64m2(base, bstride, value, vl); + return __riscv_vsse64_v_f64m2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_f64m4( @@ -31,7 +31,7 @@ void test_vsse64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t value, si // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { - return vsse64_v_f64m4(base, bstride, value, vl); + return __riscv_vsse64_v_f64m4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_f64m8( @@ -40,7 +40,7 @@ void test_vsse64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t value, si // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m8(double *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { - return vsse64_v_f64m8(base, bstride, value, vl); + return __riscv_vsse64_v_f64m8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_i64m1( @@ -49,7 +49,7 @@ void test_vsse64_v_f64m8(double *base, ptrdiff_t bstride, vfloat64m8_t value, si // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { - return vsse64_v_i64m1(base, bstride, value, vl); + return __riscv_vsse64_v_i64m1(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_i64m2( @@ -58,7 +58,7 @@ void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { - return vsse64_v_i64m2(base, bstride, value, vl); + return __riscv_vsse64_v_i64m2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_i64m4( @@ -67,7 +67,7 @@ void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { - return vsse64_v_i64m4(base, bstride, value, vl); + return __riscv_vsse64_v_i64m4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_i64m8( @@ -76,7 +76,7 @@ void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { - return vsse64_v_i64m8(base, bstride, value, vl); + return __riscv_vsse64_v_i64m8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_u64m1( @@ -85,7 +85,7 @@ void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { - return vsse64_v_u64m1(base, bstride, value, vl); + return __riscv_vsse64_v_u64m1(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_u64m2( @@ -94,7 +94,7 @@ void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { - return vsse64_v_u64m2(base, bstride, value, vl); + return __riscv_vsse64_v_u64m2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_u64m4( @@ -103,7 +103,7 @@ void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { - return vsse64_v_u64m4(base, bstride, value, vl); + return __riscv_vsse64_v_u64m4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_u64m8( @@ -112,7 +112,7 @@ void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { - return vsse64_v_u64m8(base, bstride, value, vl); + return __riscv_vsse64_v_u64m8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_f64m1_m( @@ -121,7 +121,7 @@ void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, s // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { - return vsse64_v_f64m1_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_f64m1_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_f64m2_m( @@ -130,7 +130,7 @@ void test_vsse64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vflo // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { - return vsse64_v_f64m2_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_f64m2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_f64m4_m( @@ -139,7 +139,7 @@ void test_vsse64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vflo // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { - return vsse64_v_f64m4_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_f64m4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_f64m8_m( @@ -148,7 +148,7 @@ void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, vflo // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { - return vsse64_v_f64m8_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_f64m8_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_i64m1_m( @@ -157,7 +157,7 @@ void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptrdiff_t bstride, vfloa // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { - return vsse64_v_i64m1_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_i64m1_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_i64m2_m( @@ -166,7 +166,7 @@ void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { - return vsse64_v_i64m2_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_i64m2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_i64m4_m( @@ -175,7 +175,7 @@ void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { - return vsse64_v_i64m4_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_i64m4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_i64m8_m( @@ -184,7 +184,7 @@ void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { - return vsse64_v_i64m8_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_i64m8_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_u64m1_m( @@ -193,7 +193,7 @@ void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride, vint // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { - return vsse64_v_u64m1_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_u64m1_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_u64m2_m( @@ -202,7 +202,7 @@ void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vu // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { - return vsse64_v_u64m2_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_u64m2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_u64m4_m( @@ -211,7 +211,7 @@ void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vu // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { - return vsse64_v_u64m4_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_u64m4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse64_v_u64m8_m( @@ -220,6 +220,6 @@ void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vu // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { - return vsse64_v_u64m8_m(mask, base, bstride, value, vl); + return __riscv_vsse64_v_u64m8_m(mask, base, bstride, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse8.c index 962c31356226..a013de16f567 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsse8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { - return vsse8_v_i8mf8(base, bstride, value, vl); + return __riscv_vsse8_v_i8mf8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { - return vsse8_v_i8mf4(base, bstride, value, vl); + return __riscv_vsse8_v_i8mf4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { - return vsse8_v_i8mf2(base, bstride, value, vl); + return __riscv_vsse8_v_i8mf2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8m1( @@ -39,7 +39,7 @@ void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { - return vsse8_v_i8m1(base, bstride, value, vl); + return __riscv_vsse8_v_i8m1(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8m2( @@ -48,7 +48,7 @@ void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { - return vsse8_v_i8m2(base, bstride, value, vl); + return __riscv_vsse8_v_i8m2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8m4( @@ -57,7 +57,7 @@ void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { - return vsse8_v_i8m4(base, bstride, value, vl); + return __riscv_vsse8_v_i8m4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8m8( @@ -66,7 +66,7 @@ void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { - return vsse8_v_i8m8(base, bstride, value, vl); + return __riscv_vsse8_v_i8m8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8mf8( @@ -75,7 +75,7 @@ void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { - return vsse8_v_u8mf8(base, bstride, value, vl); + return __riscv_vsse8_v_u8mf8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8mf4( @@ -84,7 +84,7 @@ void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { - return vsse8_v_u8mf4(base, bstride, value, vl); + return __riscv_vsse8_v_u8mf4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8mf2( @@ -93,7 +93,7 @@ void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { - return vsse8_v_u8mf2(base, bstride, value, vl); + return __riscv_vsse8_v_u8mf2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8m1( @@ -102,7 +102,7 @@ void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { - return vsse8_v_u8m1(base, bstride, value, vl); + return __riscv_vsse8_v_u8m1(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8m2( @@ -111,7 +111,7 @@ void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { - return vsse8_v_u8m2(base, bstride, value, vl); + return __riscv_vsse8_v_u8m2(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8m4( @@ -120,7 +120,7 @@ void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { - return vsse8_v_u8m4(base, bstride, value, vl); + return __riscv_vsse8_v_u8m4(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8m8( @@ -129,7 +129,7 @@ void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { - return vsse8_v_u8m8(base, bstride, value, vl); + return __riscv_vsse8_v_u8m8(base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8mf8_m( @@ -138,7 +138,7 @@ void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { - return vsse8_v_i8mf8_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_i8mf8_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8mf4_m( @@ -147,7 +147,7 @@ void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8 // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { - return vsse8_v_i8mf4_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_i8mf4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8mf2_m( @@ -156,7 +156,7 @@ void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8 // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { - return vsse8_v_i8mf2_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_i8mf2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8m1_m( @@ -165,7 +165,7 @@ void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8 // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { - return vsse8_v_i8m1_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_i8m1_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8m2_m( @@ -174,7 +174,7 @@ void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1 // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { - return vsse8_v_i8m2_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_i8m2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8m4_m( @@ -183,7 +183,7 @@ void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2 // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { - return vsse8_v_i8m4_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_i8m4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_i8m8_m( @@ -192,7 +192,7 @@ void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4 // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { - return vsse8_v_i8m8_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_i8m8_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8mf8_m( @@ -201,7 +201,7 @@ void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride, vint8m8 // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { - return vsse8_v_u8mf8_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_u8mf8_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8mf4_m( @@ -210,7 +210,7 @@ void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuin // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { - return vsse8_v_u8mf4_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_u8mf4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8mf2_m( @@ -219,7 +219,7 @@ void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuin // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { - return vsse8_v_u8mf2_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_u8mf2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8m1_m( @@ -228,7 +228,7 @@ void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuin // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { - return vsse8_v_u8m1_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_u8m1_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8m2_m( @@ -237,7 +237,7 @@ void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { - return vsse8_v_u8m2_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_u8m2_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8m4_m( @@ -246,7 +246,7 @@ void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { - return vsse8_v_u8m4_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_u8m4_m(mask, base, bstride, value, vl); } // CHECK-RV64-LABEL: @test_vsse8_v_u8m8_m( @@ -255,6 +255,6 @@ void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m8_m(vbool1_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { - return vsse8_v_u8m8_m(mask, base, bstride, value, vl); + return __riscv_vsse8_v_u8m8_m(mask, base, bstride, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e16.c index 47817c48c9c8..e3e77def9ef4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsseg2e16_v_f16mf4(base, v0, v1, vl); + return __riscv_vsseg2e16_v_f16mf4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsseg2e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsseg2e16_v_f16mf2(base, v0, v1, vl); + return __riscv_vsseg2e16_v_f16mf2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsseg2e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsseg2e16_v_f16m1(base, v0, v1, vl); + return __riscv_vsseg2e16_v_f16m1(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsseg2e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, si // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsseg2e16_v_f16m2(base, v0, v1, vl); + return __riscv_vsseg2e16_v_f16m2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4( @@ -49,7 +49,7 @@ void test_vsseg2e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, si // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_f16m4(_Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsseg2e16_v_f16m4(base, v0, v1, vl); + return __riscv_vsseg2e16_v_f16m4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf4( @@ -58,7 +58,7 @@ void test_vsseg2e16_v_f16m4(_Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, si // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsseg2e16_v_i16mf4(base, v0, v1, vl); + return __riscv_vsseg2e16_v_i16mf4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf2( @@ -67,7 +67,7 @@ void test_vsseg2e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsseg2e16_v_i16mf2(base, v0, v1, vl); + return __riscv_vsseg2e16_v_i16mf2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m1( @@ -76,7 +76,7 @@ void test_vsseg2e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsseg2e16_v_i16m1(base, v0, v1, vl); + return __riscv_vsseg2e16_v_i16m1(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m2( @@ -85,7 +85,7 @@ void test_vsseg2e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsseg2e16_v_i16m2(base, v0, v1, vl); + return __riscv_vsseg2e16_v_i16m2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m4( @@ -94,7 +94,7 @@ void test_vsseg2e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_i16m4(int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsseg2e16_v_i16m4(base, v0, v1, vl); + return __riscv_vsseg2e16_v_i16m4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf4( @@ -103,7 +103,7 @@ void test_vsseg2e16_v_i16m4(int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsseg2e16_v_u16mf4(base, v0, v1, vl); + return __riscv_vsseg2e16_v_u16mf4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf2( @@ -112,7 +112,7 @@ void test_vsseg2e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, s // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsseg2e16_v_u16mf2(base, v0, v1, vl); + return __riscv_vsseg2e16_v_u16mf2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m1( @@ -121,7 +121,7 @@ void test_vsseg2e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, s // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsseg2e16_v_u16m1(base, v0, v1, vl); + return __riscv_vsseg2e16_v_u16m1(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m2( @@ -130,7 +130,7 @@ void test_vsseg2e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsseg2e16_v_u16m2(base, v0, v1, vl); + return __riscv_vsseg2e16_v_u16m2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m4( @@ -139,7 +139,7 @@ void test_vsseg2e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_u16m4(uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsseg2e16_v_u16m4(base, v0, v1, vl); + return __riscv_vsseg2e16_v_u16m4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4_m( @@ -148,7 +148,7 @@ void test_vsseg2e16_v_u16m4(uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsseg2e16_v_f16mf4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_f16mf4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2_m( @@ -157,7 +157,7 @@ void test_vsseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsseg2e16_v_f16mf2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_f16mf2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1_m( @@ -166,7 +166,7 @@ void test_vsseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsseg2e16_v_f16m1_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_f16m1_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2_m( @@ -175,7 +175,7 @@ void test_vsseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsseg2e16_v_f16m2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_f16m2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4_m( @@ -184,7 +184,7 @@ void test_vsseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsseg2e16_v_f16m4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_f16m4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf4_m( @@ -193,7 +193,7 @@ void test_vsseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsseg2e16_v_i16mf4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_i16mf4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16mf2_m( @@ -202,7 +202,7 @@ void test_vsseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsseg2e16_v_i16mf2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_i16mf2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m1_m( @@ -211,7 +211,7 @@ void test_vsseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsseg2e16_v_i16m1_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_i16m1_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m2_m( @@ -220,7 +220,7 @@ void test_vsseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsseg2e16_v_i16m2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_i16m2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_i16m4_m( @@ -229,7 +229,7 @@ void test_vsseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint1 // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsseg2e16_v_i16m4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_i16m4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf4_m( @@ -238,7 +238,7 @@ void test_vsseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t v0, vint1 // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsseg2e16_v_u16mf4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_u16mf4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16mf2_m( @@ -247,7 +247,7 @@ void test_vsseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsseg2e16_v_u16mf2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_u16mf2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m1_m( @@ -256,7 +256,7 @@ void test_vsseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsseg2e16_v_u16m1_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_u16m1_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m2_m( @@ -265,7 +265,7 @@ void test_vsseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsseg2e16_v_u16m2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_u16m2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e16_v_u16m4_m( @@ -274,6 +274,6 @@ void test_vsseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t v0, vui // CHECK-RV64-NEXT: ret void // void test_vsseg2e16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsseg2e16_v_u16m4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e16_v_u16m4_m(mask, base, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e32.c index a1cbd551e200..6d73df879ca7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsseg2e32_v_f32mf2(base, v0, v1, vl); + return __riscv_vsseg2e32_v_f32mf2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vsseg2e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, si // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsseg2e32_v_f32m1(base, v0, v1, vl); + return __riscv_vsseg2e32_v_f32m1(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m2( @@ -31,7 +31,7 @@ void test_vsseg2e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_ // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsseg2e32_v_f32m2(base, v0, v1, vl); + return __riscv_vsseg2e32_v_f32m2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m4( @@ -40,7 +40,7 @@ void test_vsseg2e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_ // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_f32m4(float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsseg2e32_v_f32m4(base, v0, v1, vl); + return __riscv_vsseg2e32_v_f32m4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32mf2( @@ -49,7 +49,7 @@ void test_vsseg2e32_v_f32m4(float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_ // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsseg2e32_v_i32mf2(base, v0, v1, vl); + return __riscv_vsseg2e32_v_i32mf2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m1( @@ -58,7 +58,7 @@ void test_vsseg2e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsseg2e32_v_i32m1(base, v0, v1, vl); + return __riscv_vsseg2e32_v_i32m1(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m2( @@ -67,7 +67,7 @@ void test_vsseg2e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsseg2e32_v_i32m2(base, v0, v1, vl); + return __riscv_vsseg2e32_v_i32m2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m4( @@ -76,7 +76,7 @@ void test_vsseg2e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_i32m4(int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsseg2e32_v_i32m4(base, v0, v1, vl); + return __riscv_vsseg2e32_v_i32m4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32mf2( @@ -85,7 +85,7 @@ void test_vsseg2e32_v_i32m4(int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsseg2e32_v_u32mf2(base, v0, v1, vl); + return __riscv_vsseg2e32_v_u32mf2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m1( @@ -94,7 +94,7 @@ void test_vsseg2e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, s // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsseg2e32_v_u32m1(base, v0, v1, vl); + return __riscv_vsseg2e32_v_u32m1(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m2( @@ -103,7 +103,7 @@ void test_vsseg2e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsseg2e32_v_u32m2(base, v0, v1, vl); + return __riscv_vsseg2e32_v_u32m2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m4( @@ -112,7 +112,7 @@ void test_vsseg2e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_u32m4(uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsseg2e32_v_u32m4(base, v0, v1, vl); + return __riscv_vsseg2e32_v_u32m4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32mf2_m( @@ -121,7 +121,7 @@ void test_vsseg2e32_v_u32m4(uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsseg2e32_v_f32mf2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_f32mf2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m1_m( @@ -130,7 +130,7 @@ void test_vsseg2e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsseg2e32_v_f32m1_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_f32m1_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m2_m( @@ -139,7 +139,7 @@ void test_vsseg2e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsseg2e32_v_f32m2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_f32m2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32m4_m( @@ -148,7 +148,7 @@ void test_vsseg2e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsseg2e32_v_f32m4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_f32m4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32mf2_m( @@ -157,7 +157,7 @@ void test_vsseg2e32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t v0, vfloa // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsseg2e32_v_i32mf2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_i32mf2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m1_m( @@ -166,7 +166,7 @@ void test_vsseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsseg2e32_v_i32m1_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_i32m1_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m2_m( @@ -175,7 +175,7 @@ void test_vsseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsseg2e32_v_i32m2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_i32m2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_i32m4_m( @@ -184,7 +184,7 @@ void test_vsseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsseg2e32_v_i32m4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_i32m4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32mf2_m( @@ -193,7 +193,7 @@ void test_vsseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t v0, vint3 // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsseg2e32_v_u32mf2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_u32mf2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m1_m( @@ -202,7 +202,7 @@ void test_vsseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsseg2e32_v_u32m1_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_u32m1_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m2_m( @@ -211,7 +211,7 @@ void test_vsseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsseg2e32_v_u32m2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_u32m2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e32_v_u32m4_m( @@ -220,6 +220,6 @@ void test_vsseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsseg2e32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsseg2e32_v_u32m4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e32_v_u32m4_m(mask, base, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e64.c index bfeda3e99b88..ae8f7cf5f80e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsseg2e64_v_f64m1(base, v0, v1, vl); + return __riscv_vsseg2e64_v_f64m1(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m2( @@ -22,7 +22,7 @@ void test_vsseg2e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsseg2e64_v_f64m2(base, v0, v1, vl); + return __riscv_vsseg2e64_v_f64m2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m4( @@ -31,7 +31,7 @@ void test_vsseg2e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_f64m4(double *base, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsseg2e64_v_f64m4(base, v0, v1, vl); + return __riscv_vsseg2e64_v_f64m4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m1( @@ -40,7 +40,7 @@ void test_vsseg2e64_v_f64m4(double *base, vfloat64m4_t v0, vfloat64m4_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsseg2e64_v_i64m1(base, v0, v1, vl); + return __riscv_vsseg2e64_v_i64m1(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m2( @@ -49,7 +49,7 @@ void test_vsseg2e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsseg2e64_v_i64m2(base, v0, v1, vl); + return __riscv_vsseg2e64_v_i64m2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m4( @@ -58,7 +58,7 @@ void test_vsseg2e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_i64m4(int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsseg2e64_v_i64m4(base, v0, v1, vl); + return __riscv_vsseg2e64_v_i64m4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m1( @@ -67,7 +67,7 @@ void test_vsseg2e64_v_i64m4(int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsseg2e64_v_u64m1(base, v0, v1, vl); + return __riscv_vsseg2e64_v_u64m1(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m2( @@ -76,7 +76,7 @@ void test_vsseg2e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsseg2e64_v_u64m2(base, v0, v1, vl); + return __riscv_vsseg2e64_v_u64m2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m4( @@ -85,7 +85,7 @@ void test_vsseg2e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_u64m4(uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsseg2e64_v_u64m4(base, v0, v1, vl); + return __riscv_vsseg2e64_v_u64m4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m1_m( @@ -94,7 +94,7 @@ void test_vsseg2e64_v_u64m4(uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsseg2e64_v_f64m1_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e64_v_f64m1_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m2_m( @@ -103,7 +103,7 @@ void test_vsseg2e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsseg2e64_v_f64m2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e64_v_f64m2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_f64m4_m( @@ -112,7 +112,7 @@ void test_vsseg2e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfl // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsseg2e64_v_f64m4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e64_v_f64m4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m1_m( @@ -121,7 +121,7 @@ void test_vsseg2e64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t v0, vfl // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsseg2e64_v_i64m1_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e64_v_i64m1_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m2_m( @@ -130,7 +130,7 @@ void test_vsseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsseg2e64_v_i64m2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e64_v_i64m2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_i64m4_m( @@ -139,7 +139,7 @@ void test_vsseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsseg2e64_v_i64m4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e64_v_i64m4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m1_m( @@ -148,7 +148,7 @@ void test_vsseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsseg2e64_v_u64m1_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e64_v_u64m1_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m2_m( @@ -157,7 +157,7 @@ void test_vsseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsseg2e64_v_u64m2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e64_v_u64m2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e64_v_u64m4_m( @@ -166,6 +166,6 @@ void test_vsseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsseg2e64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsseg2e64_v_u64m4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e64_v_u64m4_m(mask, base, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e8.c index e26d3a38dbe8..df72bd6b2499 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg2e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsseg2e8_v_i8mf8(base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8mf8(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vsseg2e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsseg2e8_v_i8mf4(base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8mf4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vsseg2e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsseg2e8_v_i8mf2(base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8mf2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vsseg2e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsseg2e8_v_i8m1(base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8m1(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m2( @@ -48,7 +48,7 @@ void test_vsseg2e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsseg2e8_v_i8m2(base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8m2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m4( @@ -57,7 +57,7 @@ void test_vsseg2e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8m4(int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsseg2e8_v_i8m4(base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8m4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf8( @@ -66,7 +66,7 @@ void test_vsseg2e8_v_i8m4(int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) { // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsseg2e8_v_u8mf8(base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8mf8(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf4( @@ -75,7 +75,7 @@ void test_vsseg2e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsseg2e8_v_u8mf4(base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8mf4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf2( @@ -84,7 +84,7 @@ void test_vsseg2e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsseg2e8_v_u8mf2(base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8mf2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m1( @@ -93,7 +93,7 @@ void test_vsseg2e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsseg2e8_v_u8m1(base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8m1(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m2( @@ -102,7 +102,7 @@ void test_vsseg2e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsseg2e8_v_u8m2(base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8m2(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m4( @@ -111,7 +111,7 @@ void test_vsseg2e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8m4(uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsseg2e8_v_u8m4(base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8m4(base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf8_m( @@ -120,7 +120,7 @@ void test_vsseg2e8_v_u8m4(uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsseg2e8_v_i8mf8_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8mf8_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf4_m( @@ -129,7 +129,7 @@ void test_vsseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsseg2e8_v_i8mf4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8mf4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8mf2_m( @@ -138,7 +138,7 @@ void test_vsseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsseg2e8_v_i8mf2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8mf2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m1_m( @@ -147,7 +147,7 @@ void test_vsseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsseg2e8_v_i8m1_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8m1_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m2_m( @@ -156,7 +156,7 @@ void test_vsseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsseg2e8_v_i8m2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8m2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_i8m4_m( @@ -165,7 +165,7 @@ void test_vsseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsseg2e8_v_i8m4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_i8m4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf8_m( @@ -174,7 +174,7 @@ void test_vsseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t v0, vint8m4_t // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsseg2e8_v_u8mf8_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8mf8_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf4_m( @@ -183,7 +183,7 @@ void test_vsseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsseg2e8_v_u8mf4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8mf4_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8mf2_m( @@ -192,7 +192,7 @@ void test_vsseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsseg2e8_v_u8mf2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8mf2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m1_m( @@ -201,7 +201,7 @@ void test_vsseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsseg2e8_v_u8m1_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8m1_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m2_m( @@ -210,7 +210,7 @@ void test_vsseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsseg2e8_v_u8m2_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8m2_m(mask, base, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsseg2e8_v_u8m4_m( @@ -219,6 +219,6 @@ void test_vsseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m // CHECK-RV64-NEXT: ret void // void test_vsseg2e8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsseg2e8_v_u8m4_m(mask, base, v0, v1, vl); + return __riscv_vsseg2e8_v_u8m4_m(mask, base, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e16.c index 35b44fd7f2a3..6e68760fb1fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsseg3e16_v_f16mf4(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_f16mf4(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsseg3e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsseg3e16_v_f16mf2(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_f16mf2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsseg3e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsseg3e16_v_f16m1(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_f16m1(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsseg3e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsseg3e16_v_f16m2(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_f16m2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf4( @@ -49,7 +49,7 @@ void test_vsseg3e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsseg3e16_v_i16mf4(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_i16mf4(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf2( @@ -58,7 +58,7 @@ void test_vsseg3e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsseg3e16_v_i16mf2(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_i16mf2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m1( @@ -67,7 +67,7 @@ void test_vsseg3e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsseg3e16_v_i16m1(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_i16m1(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m2( @@ -76,7 +76,7 @@ void test_vsseg3e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsseg3e16_v_i16m2(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_i16m2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf4( @@ -85,7 +85,7 @@ void test_vsseg3e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsseg3e16_v_u16mf4(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_u16mf4(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf2( @@ -94,7 +94,7 @@ void test_vsseg3e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsseg3e16_v_u16mf2(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_u16mf2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m1( @@ -103,7 +103,7 @@ void test_vsseg3e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsseg3e16_v_u16m1(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_u16m1(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m2( @@ -112,7 +112,7 @@ void test_vsseg3e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsseg3e16_v_u16m2(base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_u16m2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4_m( @@ -121,7 +121,7 @@ void test_vsseg3e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsseg3e16_v_f16mf4_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_f16mf4_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2_m( @@ -130,7 +130,7 @@ void test_vsseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsseg3e16_v_f16mf2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_f16mf2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1_m( @@ -139,7 +139,7 @@ void test_vsseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsseg3e16_v_f16m1_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_f16m1_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2_m( @@ -148,7 +148,7 @@ void test_vsseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsseg3e16_v_f16m2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_f16m2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf4_m( @@ -157,7 +157,7 @@ void test_vsseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsseg3e16_v_i16mf4_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_i16mf4_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16mf2_m( @@ -166,7 +166,7 @@ void test_vsseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsseg3e16_v_i16mf2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_i16mf2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m1_m( @@ -175,7 +175,7 @@ void test_vsseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsseg3e16_v_i16m1_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_i16m1_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_i16m2_m( @@ -184,7 +184,7 @@ void test_vsseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsseg3e16_v_i16m2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_i16m2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf4_m( @@ -193,7 +193,7 @@ void test_vsseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint1 // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsseg3e16_v_u16mf4_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_u16mf4_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16mf2_m( @@ -202,7 +202,7 @@ void test_vsseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsseg3e16_v_u16mf2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_u16mf2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m1_m( @@ -211,7 +211,7 @@ void test_vsseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsseg3e16_v_u16m1_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_u16m1_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e16_v_u16m2_m( @@ -220,6 +220,6 @@ void test_vsseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsseg3e16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsseg3e16_v_u16m2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e16_v_u16m2_m(mask, base, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e32.c index 6800c19fef41..bc00c38fb9a4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsseg3e32_v_f32mf2(base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_f32mf2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vsseg3e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsseg3e32_v_f32m1(base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_f32m1(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m2( @@ -31,7 +31,7 @@ void test_vsseg3e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsseg3e32_v_f32m2(base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_f32m2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32mf2( @@ -40,7 +40,7 @@ void test_vsseg3e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsseg3e32_v_i32mf2(base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_i32mf2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m1( @@ -49,7 +49,7 @@ void test_vsseg3e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsseg3e32_v_i32m1(base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_i32m1(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m2( @@ -58,7 +58,7 @@ void test_vsseg3e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsseg3e32_v_i32m2(base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_i32m2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32mf2( @@ -67,7 +67,7 @@ void test_vsseg3e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsseg3e32_v_u32mf2(base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_u32mf2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m1( @@ -76,7 +76,7 @@ void test_vsseg3e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsseg3e32_v_u32m1(base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_u32m1(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m2( @@ -85,7 +85,7 @@ void test_vsseg3e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsseg3e32_v_u32m2(base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_u32m2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_f32mf2_m( @@ -94,7 +94,7 @@ void test_vsseg3e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsseg3e32_v_f32mf2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_f32mf2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m1_m( @@ -103,7 +103,7 @@ void test_vsseg3e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsseg3e32_v_f32m1_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_f32m1_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_f32m2_m( @@ -112,7 +112,7 @@ void test_vsseg3e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsseg3e32_v_f32m2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_f32m2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32mf2_m( @@ -121,7 +121,7 @@ void test_vsseg3e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsseg3e32_v_i32mf2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_i32mf2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m1_m( @@ -130,7 +130,7 @@ void test_vsseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsseg3e32_v_i32m1_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_i32m1_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_i32m2_m( @@ -139,7 +139,7 @@ void test_vsseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsseg3e32_v_i32m2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_i32m2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32mf2_m( @@ -148,7 +148,7 @@ void test_vsseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsseg3e32_v_u32mf2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_u32mf2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m1_m( @@ -157,7 +157,7 @@ void test_vsseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsseg3e32_v_u32m1_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_u32m1_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e32_v_u32m2_m( @@ -166,6 +166,6 @@ void test_vsseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsseg3e32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsseg3e32_v_u32m2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e32_v_u32m2_m(mask, base, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e64.c index b0ef22f68790..9ea648556c93 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsseg3e64_v_f64m1(base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_f64m1(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e64_v_f64m2( @@ -22,7 +22,7 @@ void test_vsseg3e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsseg3e64_v_f64m2(base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_f64m2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m1( @@ -31,7 +31,7 @@ void test_vsseg3e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsseg3e64_v_i64m1(base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_i64m1(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m2( @@ -40,7 +40,7 @@ void test_vsseg3e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsseg3e64_v_i64m2(base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_i64m2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m1( @@ -49,7 +49,7 @@ void test_vsseg3e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsseg3e64_v_u64m1(base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_u64m1(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m2( @@ -58,7 +58,7 @@ void test_vsseg3e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsseg3e64_v_u64m2(base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_u64m2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e64_v_f64m1_m( @@ -67,7 +67,7 @@ void test_vsseg3e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsseg3e64_v_f64m1_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_f64m1_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e64_v_f64m2_m( @@ -76,7 +76,7 @@ void test_vsseg3e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsseg3e64_v_f64m2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_f64m2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m1_m( @@ -85,7 +85,7 @@ void test_vsseg3e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfl // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsseg3e64_v_i64m1_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_i64m1_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e64_v_i64m2_m( @@ -94,7 +94,7 @@ void test_vsseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsseg3e64_v_i64m2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_i64m2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m1_m( @@ -103,7 +103,7 @@ void test_vsseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsseg3e64_v_u64m1_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_u64m1_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e64_v_u64m2_m( @@ -112,6 +112,6 @@ void test_vsseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsseg3e64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsseg3e64_v_u64m2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e64_v_u64m2_m(mask, base, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e8.c index 8da5fbbc347c..790eea45a778 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg3e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsseg3e8_v_i8mf8(base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_i8mf8(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vsseg3e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsseg3e8_v_i8mf4(base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_i8mf4(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vsseg3e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsseg3e8_v_i8mf2(base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_i8mf2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vsseg3e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsseg3e8_v_i8m1(base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_i8m1(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m2( @@ -48,7 +48,7 @@ void test_vsseg3e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2 // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsseg3e8_v_i8m2(base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_i8m2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf8( @@ -57,7 +57,7 @@ void test_vsseg3e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2 // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsseg3e8_v_u8mf8(base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_u8mf8(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf4( @@ -66,7 +66,7 @@ void test_vsseg3e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsseg3e8_v_u8mf4(base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_u8mf4(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf2( @@ -75,7 +75,7 @@ void test_vsseg3e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsseg3e8_v_u8mf2(base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_u8mf2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m1( @@ -84,7 +84,7 @@ void test_vsseg3e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsseg3e8_v_u8m1(base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_u8m1(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m2( @@ -93,7 +93,7 @@ void test_vsseg3e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_ // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsseg3e8_v_u8m2(base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_u8m2(base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf8_m( @@ -102,7 +102,7 @@ void test_vsseg3e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_ // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsseg3e8_v_i8mf8_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_i8mf8_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf4_m( @@ -111,7 +111,7 @@ void test_vsseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsseg3e8_v_i8mf4_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_i8mf4_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8mf2_m( @@ -120,7 +120,7 @@ void test_vsseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsseg3e8_v_i8mf2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_i8mf2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m1_m( @@ -129,7 +129,7 @@ void test_vsseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsseg3e8_v_i8m1_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_i8m1_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_i8m2_m( @@ -138,7 +138,7 @@ void test_vsseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsseg3e8_v_i8m2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_i8m2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf8_m( @@ -147,7 +147,7 @@ void test_vsseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsseg3e8_v_u8mf8_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_u8mf8_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf4_m( @@ -156,7 +156,7 @@ void test_vsseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsseg3e8_v_u8mf4_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_u8mf4_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8mf2_m( @@ -165,7 +165,7 @@ void test_vsseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsseg3e8_v_u8mf2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_u8mf2_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m1_m( @@ -174,7 +174,7 @@ void test_vsseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsseg3e8_v_u8m1_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_u8m1_m(mask, base, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsseg3e8_v_u8m2_m( @@ -183,6 +183,6 @@ void test_vsseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m // CHECK-RV64-NEXT: ret void // void test_vsseg3e8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsseg3e8_v_u8m2_m(mask, base, v0, v1, v2, vl); + return __riscv_vsseg3e8_v_u8m2_m(mask, base, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e16.c index e338b8039445..50358c71974f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsseg4e16_v_f16mf4(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_f16mf4(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsseg4e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsseg4e16_v_f16mf2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_f16mf2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsseg4e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsseg4e16_v_f16m1(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_f16m1(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsseg4e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsseg4e16_v_f16m2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_f16m2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf4( @@ -49,7 +49,7 @@ void test_vsseg4e16_v_f16m2(_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsseg4e16_v_i16mf4(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_i16mf4(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf2( @@ -58,7 +58,7 @@ void test_vsseg4e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsseg4e16_v_i16mf2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_i16mf2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m1( @@ -67,7 +67,7 @@ void test_vsseg4e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsseg4e16_v_i16m1(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_i16m1(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m2( @@ -76,7 +76,7 @@ void test_vsseg4e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsseg4e16_v_i16m2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_i16m2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf4( @@ -85,7 +85,7 @@ void test_vsseg4e16_v_i16m2(int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsseg4e16_v_u16mf4(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_u16mf4(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf2( @@ -94,7 +94,7 @@ void test_vsseg4e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsseg4e16_v_u16mf2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_u16mf2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m1( @@ -103,7 +103,7 @@ void test_vsseg4e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsseg4e16_v_u16m1(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_u16m1(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m2( @@ -112,7 +112,7 @@ void test_vsseg4e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsseg4e16_v_u16m2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_u16m2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4_m( @@ -121,7 +121,7 @@ void test_vsseg4e16_v_u16m2(uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsseg4e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2_m( @@ -130,7 +130,7 @@ void test_vsseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsseg4e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1_m( @@ -139,7 +139,7 @@ void test_vsseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsseg4e16_v_f16m1_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_f16m1_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2_m( @@ -148,7 +148,7 @@ void test_vsseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsseg4e16_v_f16m2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_f16m2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf4_m( @@ -157,7 +157,7 @@ void test_vsseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsseg4e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16mf2_m( @@ -166,7 +166,7 @@ void test_vsseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsseg4e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m1_m( @@ -175,7 +175,7 @@ void test_vsseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsseg4e16_v_i16m1_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_i16m1_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_i16m2_m( @@ -184,7 +184,7 @@ void test_vsseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsseg4e16_v_i16m2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_i16m2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf4_m( @@ -193,7 +193,7 @@ void test_vsseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t v0, vint1 // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsseg4e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16mf2_m( @@ -202,7 +202,7 @@ void test_vsseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsseg4e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m1_m( @@ -211,7 +211,7 @@ void test_vsseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsseg4e16_v_u16m1_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_u16m1_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e16_v_u16m2_m( @@ -220,6 +220,6 @@ void test_vsseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsseg4e16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsseg4e16_v_u16m2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e16_v_u16m2_m(mask, base, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e32.c index 1af08d387eea..e06169286a5b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsseg4e32_v_f32mf2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_f32mf2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vsseg4e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsseg4e32_v_f32m1(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_f32m1(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m2( @@ -31,7 +31,7 @@ void test_vsseg4e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsseg4e32_v_f32m2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_f32m2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32mf2( @@ -40,7 +40,7 @@ void test_vsseg4e32_v_f32m2(float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsseg4e32_v_i32mf2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_i32mf2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m1( @@ -49,7 +49,7 @@ void test_vsseg4e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsseg4e32_v_i32m1(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_i32m1(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m2( @@ -58,7 +58,7 @@ void test_vsseg4e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsseg4e32_v_i32m2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_i32m2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32mf2( @@ -67,7 +67,7 @@ void test_vsseg4e32_v_i32m2(int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsseg4e32_v_u32mf2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_u32mf2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m1( @@ -76,7 +76,7 @@ void test_vsseg4e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsseg4e32_v_u32m1(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_u32m1(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m2( @@ -85,7 +85,7 @@ void test_vsseg4e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsseg4e32_v_u32m2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_u32m2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_f32mf2_m( @@ -94,7 +94,7 @@ void test_vsseg4e32_v_u32m2(uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsseg4e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m1_m( @@ -103,7 +103,7 @@ void test_vsseg4e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsseg4e32_v_f32m1_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_f32m1_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_f32m2_m( @@ -112,7 +112,7 @@ void test_vsseg4e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsseg4e32_v_f32m2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_f32m2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32mf2_m( @@ -121,7 +121,7 @@ void test_vsseg4e32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t v0, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsseg4e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m1_m( @@ -130,7 +130,7 @@ void test_vsseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsseg4e32_v_i32m1_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_i32m1_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_i32m2_m( @@ -139,7 +139,7 @@ void test_vsseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsseg4e32_v_i32m2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_i32m2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32mf2_m( @@ -148,7 +148,7 @@ void test_vsseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsseg4e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m1_m( @@ -157,7 +157,7 @@ void test_vsseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsseg4e32_v_u32m1_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_u32m1_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e32_v_u32m2_m( @@ -166,6 +166,6 @@ void test_vsseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsseg4e32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsseg4e32_v_u32m2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e32_v_u32m2_m(mask, base, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e64.c index 153f2ff7c12d..a904803a34d4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsseg4e64_v_f64m1(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_f64m1(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e64_v_f64m2( @@ -22,7 +22,7 @@ void test_vsseg4e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsseg4e64_v_f64m2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_f64m2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m1( @@ -31,7 +31,7 @@ void test_vsseg4e64_v_f64m2(double *base, vfloat64m2_t v0, vfloat64m2_t v1, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsseg4e64_v_i64m1(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_i64m1(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m2( @@ -40,7 +40,7 @@ void test_vsseg4e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsseg4e64_v_i64m2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_i64m2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m1( @@ -49,7 +49,7 @@ void test_vsseg4e64_v_i64m2(int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsseg4e64_v_u64m1(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_u64m1(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m2( @@ -58,7 +58,7 @@ void test_vsseg4e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsseg4e64_v_u64m2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_u64m2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e64_v_f64m1_m( @@ -67,7 +67,7 @@ void test_vsseg4e64_v_u64m2(uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsseg4e64_v_f64m1_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_f64m1_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e64_v_f64m2_m( @@ -76,7 +76,7 @@ void test_vsseg4e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsseg4e64_v_f64m2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_f64m2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m1_m( @@ -85,7 +85,7 @@ void test_vsseg4e64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t v0, vfl // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsseg4e64_v_i64m1_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_i64m1_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e64_v_i64m2_m( @@ -94,7 +94,7 @@ void test_vsseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsseg4e64_v_i64m2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_i64m2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m1_m( @@ -103,7 +103,7 @@ void test_vsseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsseg4e64_v_u64m1_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_u64m1_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e64_v_u64m2_m( @@ -112,6 +112,6 @@ void test_vsseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsseg4e64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsseg4e64_v_u64m2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e64_v_u64m2_m(mask, base, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e8.c index 4a860b14dd43..c6906479f412 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg4e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsseg4e8_v_i8mf8(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_i8mf8(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vsseg4e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsseg4e8_v_i8mf4(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_i8mf4(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vsseg4e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsseg4e8_v_i8mf2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_i8mf2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vsseg4e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsseg4e8_v_i8m1(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_i8m1(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m2( @@ -48,7 +48,7 @@ void test_vsseg4e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2 // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsseg4e8_v_i8m2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_i8m2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf8( @@ -57,7 +57,7 @@ void test_vsseg4e8_v_i8m2(int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2 // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsseg4e8_v_u8mf8(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_u8mf8(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf4( @@ -66,7 +66,7 @@ void test_vsseg4e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsseg4e8_v_u8mf4(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_u8mf4(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf2( @@ -75,7 +75,7 @@ void test_vsseg4e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsseg4e8_v_u8mf2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_u8mf2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m1( @@ -84,7 +84,7 @@ void test_vsseg4e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsseg4e8_v_u8m1(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_u8m1(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m2( @@ -93,7 +93,7 @@ void test_vsseg4e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_ // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsseg4e8_v_u8m2(base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_u8m2(base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf8_m( @@ -102,7 +102,7 @@ void test_vsseg4e8_v_u8m2(uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_ // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsseg4e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf4_m( @@ -111,7 +111,7 @@ void test_vsseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsseg4e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8mf2_m( @@ -120,7 +120,7 @@ void test_vsseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsseg4e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m1_m( @@ -129,7 +129,7 @@ void test_vsseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsseg4e8_v_i8m1_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_i8m1_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_i8m2_m( @@ -138,7 +138,7 @@ void test_vsseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsseg4e8_v_i8m2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_i8m2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf8_m( @@ -147,7 +147,7 @@ void test_vsseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t v0, vint8m2_t // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsseg4e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf4_m( @@ -156,7 +156,7 @@ void test_vsseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsseg4e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8mf2_m( @@ -165,7 +165,7 @@ void test_vsseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsseg4e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m1_m( @@ -174,7 +174,7 @@ void test_vsseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsseg4e8_v_u8m1_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_u8m1_m(mask, base, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsseg4e8_v_u8m2_m( @@ -183,6 +183,6 @@ void test_vsseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m // CHECK-RV64-NEXT: ret void // void test_vsseg4e8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsseg4e8_v_u8m2_m(mask, base, v0, v1, v2, v3, vl); + return __riscv_vsseg4e8_v_u8m2_m(mask, base, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e16.c index 263e81372aac..c4fe14623caa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsseg5e16_v_f16mf4(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_f16mf4(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsseg5e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsseg5e16_v_f16mf2(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_f16mf2(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsseg5e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsseg5e16_v_f16m1(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_f16m1(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf4( @@ -40,7 +40,7 @@ void test_vsseg5e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsseg5e16_v_i16mf4(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_i16mf4(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf2( @@ -49,7 +49,7 @@ void test_vsseg5e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsseg5e16_v_i16mf2(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_i16mf2(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16m1( @@ -58,7 +58,7 @@ void test_vsseg5e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsseg5e16_v_i16m1(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_i16m1(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf4( @@ -67,7 +67,7 @@ void test_vsseg5e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsseg5e16_v_u16mf4(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_u16mf4(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf2( @@ -76,7 +76,7 @@ void test_vsseg5e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsseg5e16_v_u16mf2(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_u16mf2(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16m1( @@ -85,7 +85,7 @@ void test_vsseg5e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsseg5e16_v_u16m1(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_u16m1(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4_m( @@ -94,7 +94,7 @@ void test_vsseg5e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsseg5e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2_m( @@ -103,7 +103,7 @@ void test_vsseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsseg5e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1_m( @@ -112,7 +112,7 @@ void test_vsseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsseg5e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf4_m( @@ -121,7 +121,7 @@ void test_vsseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsseg5e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16mf2_m( @@ -130,7 +130,7 @@ void test_vsseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsseg5e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_i16m1_m( @@ -139,7 +139,7 @@ void test_vsseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsseg5e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf4_m( @@ -148,7 +148,7 @@ void test_vsseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsseg5e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16mf2_m( @@ -157,7 +157,7 @@ void test_vsseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsseg5e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e16_v_u16m1_m( @@ -166,6 +166,6 @@ void test_vsseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg5e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsseg5e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e32.c index 8f3c05fdd46c..415efd8ce19f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsseg5e32_v_f32mf2(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_f32mf2(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vsseg5e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsseg5e32_v_f32m1(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_f32m1(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e32_v_i32mf2( @@ -31,7 +31,7 @@ void test_vsseg5e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsseg5e32_v_i32mf2(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_i32mf2(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e32_v_i32m1( @@ -40,7 +40,7 @@ void test_vsseg5e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsseg5e32_v_i32m1(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_i32m1(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e32_v_u32mf2( @@ -49,7 +49,7 @@ void test_vsseg5e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsseg5e32_v_u32mf2(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_u32mf2(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e32_v_u32m1( @@ -58,7 +58,7 @@ void test_vsseg5e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsseg5e32_v_u32m1(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_u32m1(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e32_v_f32mf2_m( @@ -67,7 +67,7 @@ void test_vsseg5e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsseg5e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e32_v_f32m1_m( @@ -76,7 +76,7 @@ void test_vsseg5e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsseg5e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e32_v_i32mf2_m( @@ -85,7 +85,7 @@ void test_vsseg5e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsseg5e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e32_v_i32m1_m( @@ -94,7 +94,7 @@ void test_vsseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsseg5e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e32_v_u32mf2_m( @@ -103,7 +103,7 @@ void test_vsseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsseg5e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e32_v_u32m1_m( @@ -112,6 +112,6 @@ void test_vsseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg5e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsseg5e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e64.c index d9583b462bba..757e490efdd4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg5e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsseg5e64_v_f64m1(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e64_v_f64m1(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e64_v_i64m1( @@ -22,7 +22,7 @@ void test_vsseg5e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg5e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsseg5e64_v_i64m1(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e64_v_i64m1(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e64_v_u64m1( @@ -31,7 +31,7 @@ void test_vsseg5e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m // CHECK-RV64-NEXT: ret void // void test_vsseg5e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsseg5e64_v_u64m1(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e64_v_u64m1(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e64_v_f64m1_m( @@ -40,7 +40,7 @@ void test_vsseg5e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg5e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsseg5e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e64_v_i64m1_m( @@ -49,7 +49,7 @@ void test_vsseg5e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl // CHECK-RV64-NEXT: ret void // void test_vsseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsseg5e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e64_v_u64m1_m( @@ -58,6 +58,6 @@ void test_vsseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg5e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsseg5e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e8.c index fb7aa57bf4cb..97dc397876d7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg5e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsseg5e8_v_i8mf8(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_i8mf8(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vsseg5e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsseg5e8_v_i8mf4(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_i8mf4(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vsseg5e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsseg5e8_v_i8mf2(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_i8mf2(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vsseg5e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsseg5e8_v_i8m1(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_i8m1(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf8( @@ -48,7 +48,7 @@ void test_vsseg5e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2 // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsseg5e8_v_u8mf8(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_u8mf8(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf4( @@ -57,7 +57,7 @@ void test_vsseg5e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsseg5e8_v_u8mf4(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_u8mf4(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf2( @@ -66,7 +66,7 @@ void test_vsseg5e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsseg5e8_v_u8mf2(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_u8mf2(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8m1( @@ -75,7 +75,7 @@ void test_vsseg5e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsseg5e8_v_u8m1(base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_u8m1(base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf8_m( @@ -84,7 +84,7 @@ void test_vsseg5e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_ // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsseg5e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf4_m( @@ -93,7 +93,7 @@ void test_vsseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsseg5e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8mf2_m( @@ -102,7 +102,7 @@ void test_vsseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsseg5e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_i8m1_m( @@ -111,7 +111,7 @@ void test_vsseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsseg5e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf8_m( @@ -120,7 +120,7 @@ void test_vsseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsseg5e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf4_m( @@ -129,7 +129,7 @@ void test_vsseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsseg5e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8mf2_m( @@ -138,7 +138,7 @@ void test_vsseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsseg5e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsseg5e8_v_u8m1_m( @@ -147,6 +147,6 @@ void test_vsseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg5e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsseg5e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, vl); + return __riscv_vsseg5e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e16.c index 60c012cbea82..ccf9c59f88c4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsseg6e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsseg6e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsseg6e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsseg6e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsseg6e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf4( @@ -40,7 +40,7 @@ void test_vsseg6e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsseg6e16_v_i16mf4(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_i16mf4(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf2( @@ -49,7 +49,7 @@ void test_vsseg6e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsseg6e16_v_i16mf2(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_i16mf2(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16m1( @@ -58,7 +58,7 @@ void test_vsseg6e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsseg6e16_v_i16m1(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_i16m1(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf4( @@ -67,7 +67,7 @@ void test_vsseg6e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsseg6e16_v_u16mf4(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_u16mf4(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf2( @@ -76,7 +76,7 @@ void test_vsseg6e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsseg6e16_v_u16mf2(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_u16mf2(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16m1( @@ -85,7 +85,7 @@ void test_vsseg6e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsseg6e16_v_u16m1(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_u16m1(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4_m( @@ -94,7 +94,7 @@ void test_vsseg6e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsseg6e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2_m( @@ -103,7 +103,7 @@ void test_vsseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsseg6e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1_m( @@ -112,7 +112,7 @@ void test_vsseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsseg6e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf4_m( @@ -121,7 +121,7 @@ void test_vsseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsseg6e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16mf2_m( @@ -130,7 +130,7 @@ void test_vsseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsseg6e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_i16m1_m( @@ -139,7 +139,7 @@ void test_vsseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsseg6e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf4_m( @@ -148,7 +148,7 @@ void test_vsseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsseg6e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16mf2_m( @@ -157,7 +157,7 @@ void test_vsseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsseg6e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e16_v_u16m1_m( @@ -166,6 +166,6 @@ void test_vsseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg6e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsseg6e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e32.c index 421b3b89f363..57d33d224637 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsseg6e32_v_f32mf2(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_f32mf2(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vsseg6e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsseg6e32_v_f32m1(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_f32m1(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e32_v_i32mf2( @@ -31,7 +31,7 @@ void test_vsseg6e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsseg6e32_v_i32mf2(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_i32mf2(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e32_v_i32m1( @@ -40,7 +40,7 @@ void test_vsseg6e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsseg6e32_v_i32m1(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_i32m1(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e32_v_u32mf2( @@ -49,7 +49,7 @@ void test_vsseg6e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsseg6e32_v_u32mf2(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_u32mf2(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e32_v_u32m1( @@ -58,7 +58,7 @@ void test_vsseg6e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsseg6e32_v_u32m1(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_u32m1(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e32_v_f32mf2_m( @@ -67,7 +67,7 @@ void test_vsseg6e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsseg6e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e32_v_f32m1_m( @@ -76,7 +76,7 @@ void test_vsseg6e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsseg6e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e32_v_i32mf2_m( @@ -85,7 +85,7 @@ void test_vsseg6e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsseg6e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e32_v_i32m1_m( @@ -94,7 +94,7 @@ void test_vsseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsseg6e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e32_v_u32mf2_m( @@ -103,7 +103,7 @@ void test_vsseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsseg6e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e32_v_u32m1_m( @@ -112,6 +112,6 @@ void test_vsseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg6e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsseg6e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e64.c index d6b5c15c8515..a54fac861e31 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg6e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsseg6e64_v_f64m1(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e64_v_f64m1(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e64_v_i64m1( @@ -22,7 +22,7 @@ void test_vsseg6e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg6e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsseg6e64_v_i64m1(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e64_v_i64m1(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e64_v_u64m1( @@ -31,7 +31,7 @@ void test_vsseg6e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m // CHECK-RV64-NEXT: ret void // void test_vsseg6e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsseg6e64_v_u64m1(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e64_v_u64m1(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e64_v_f64m1_m( @@ -40,7 +40,7 @@ void test_vsseg6e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg6e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsseg6e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e64_v_i64m1_m( @@ -49,7 +49,7 @@ void test_vsseg6e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl // CHECK-RV64-NEXT: ret void // void test_vsseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsseg6e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e64_v_u64m1_m( @@ -58,6 +58,6 @@ void test_vsseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg6e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsseg6e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e8.c index ba9b3415cc9d..d04515da3282 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg6e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsseg6e8_v_i8mf8(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_i8mf8(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vsseg6e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsseg6e8_v_i8mf4(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_i8mf4(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vsseg6e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsseg6e8_v_i8mf2(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_i8mf2(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vsseg6e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsseg6e8_v_i8m1(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_i8m1(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf8( @@ -48,7 +48,7 @@ void test_vsseg6e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2 // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsseg6e8_v_u8mf8(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_u8mf8(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf4( @@ -57,7 +57,7 @@ void test_vsseg6e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsseg6e8_v_u8mf4(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_u8mf4(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf2( @@ -66,7 +66,7 @@ void test_vsseg6e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsseg6e8_v_u8mf2(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_u8mf2(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8m1( @@ -75,7 +75,7 @@ void test_vsseg6e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsseg6e8_v_u8m1(base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_u8m1(base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf8_m( @@ -84,7 +84,7 @@ void test_vsseg6e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_ // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsseg6e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf4_m( @@ -93,7 +93,7 @@ void test_vsseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsseg6e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8mf2_m( @@ -102,7 +102,7 @@ void test_vsseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsseg6e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_i8m1_m( @@ -111,7 +111,7 @@ void test_vsseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsseg6e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf8_m( @@ -120,7 +120,7 @@ void test_vsseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsseg6e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf4_m( @@ -129,7 +129,7 @@ void test_vsseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsseg6e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8mf2_m( @@ -138,7 +138,7 @@ void test_vsseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsseg6e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsseg6e8_v_u8m1_m( @@ -147,6 +147,6 @@ void test_vsseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg6e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsseg6e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsseg6e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e16.c index d2d8d1208f66..fd5cd805fe58 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsseg7e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsseg7e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsseg7e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsseg7e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsseg7e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf4( @@ -40,7 +40,7 @@ void test_vsseg7e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsseg7e16_v_i16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_i16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf2( @@ -49,7 +49,7 @@ void test_vsseg7e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsseg7e16_v_i16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_i16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16m1( @@ -58,7 +58,7 @@ void test_vsseg7e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsseg7e16_v_i16m1(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_i16m1(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf4( @@ -67,7 +67,7 @@ void test_vsseg7e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsseg7e16_v_u16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_u16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf2( @@ -76,7 +76,7 @@ void test_vsseg7e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsseg7e16_v_u16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_u16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16m1( @@ -85,7 +85,7 @@ void test_vsseg7e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsseg7e16_v_u16m1(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_u16m1(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4_m( @@ -94,7 +94,7 @@ void test_vsseg7e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsseg7e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2_m( @@ -103,7 +103,7 @@ void test_vsseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsseg7e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1_m( @@ -112,7 +112,7 @@ void test_vsseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsseg7e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf4_m( @@ -121,7 +121,7 @@ void test_vsseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsseg7e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16mf2_m( @@ -130,7 +130,7 @@ void test_vsseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsseg7e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_i16m1_m( @@ -139,7 +139,7 @@ void test_vsseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsseg7e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf4_m( @@ -148,7 +148,7 @@ void test_vsseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsseg7e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16mf2_m( @@ -157,7 +157,7 @@ void test_vsseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsseg7e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e16_v_u16m1_m( @@ -166,6 +166,6 @@ void test_vsseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg7e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsseg7e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e32.c index af7943265ad2..4e6183eb2512 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsseg7e32_v_f32mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_f32mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vsseg7e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsseg7e32_v_f32m1(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_f32m1(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e32_v_i32mf2( @@ -31,7 +31,7 @@ void test_vsseg7e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsseg7e32_v_i32mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_i32mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e32_v_i32m1( @@ -40,7 +40,7 @@ void test_vsseg7e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsseg7e32_v_i32m1(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_i32m1(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e32_v_u32mf2( @@ -49,7 +49,7 @@ void test_vsseg7e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsseg7e32_v_u32mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_u32mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e32_v_u32m1( @@ -58,7 +58,7 @@ void test_vsseg7e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsseg7e32_v_u32m1(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_u32m1(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e32_v_f32mf2_m( @@ -67,7 +67,7 @@ void test_vsseg7e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsseg7e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e32_v_f32m1_m( @@ -76,7 +76,7 @@ void test_vsseg7e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsseg7e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e32_v_i32mf2_m( @@ -85,7 +85,7 @@ void test_vsseg7e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsseg7e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e32_v_i32m1_m( @@ -94,7 +94,7 @@ void test_vsseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsseg7e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e32_v_u32mf2_m( @@ -103,7 +103,7 @@ void test_vsseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsseg7e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e32_v_u32m1_m( @@ -112,6 +112,6 @@ void test_vsseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg7e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsseg7e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e64.c index b482b7471253..db60857eb64f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg7e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsseg7e64_v_f64m1(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e64_v_f64m1(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e64_v_i64m1( @@ -22,7 +22,7 @@ void test_vsseg7e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg7e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsseg7e64_v_i64m1(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e64_v_i64m1(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e64_v_u64m1( @@ -31,7 +31,7 @@ void test_vsseg7e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m // CHECK-RV64-NEXT: ret void // void test_vsseg7e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsseg7e64_v_u64m1(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e64_v_u64m1(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e64_v_f64m1_m( @@ -40,7 +40,7 @@ void test_vsseg7e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg7e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsseg7e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e64_v_i64m1_m( @@ -49,7 +49,7 @@ void test_vsseg7e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl // CHECK-RV64-NEXT: ret void // void test_vsseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsseg7e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e64_v_u64m1_m( @@ -58,6 +58,6 @@ void test_vsseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg7e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsseg7e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e8.c index 06a9af5e21da..f1f4320791dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg7e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsseg7e8_v_i8mf8(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_i8mf8(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vsseg7e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsseg7e8_v_i8mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_i8mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vsseg7e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsseg7e8_v_i8mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_i8mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vsseg7e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsseg7e8_v_i8m1(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_i8m1(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf8( @@ -48,7 +48,7 @@ void test_vsseg7e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2 // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsseg7e8_v_u8mf8(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_u8mf8(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf4( @@ -57,7 +57,7 @@ void test_vsseg7e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsseg7e8_v_u8mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_u8mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf2( @@ -66,7 +66,7 @@ void test_vsseg7e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsseg7e8_v_u8mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_u8mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8m1( @@ -75,7 +75,7 @@ void test_vsseg7e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsseg7e8_v_u8m1(base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_u8m1(base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf8_m( @@ -84,7 +84,7 @@ void test_vsseg7e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_ // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsseg7e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf4_m( @@ -93,7 +93,7 @@ void test_vsseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsseg7e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8mf2_m( @@ -102,7 +102,7 @@ void test_vsseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsseg7e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_i8m1_m( @@ -111,7 +111,7 @@ void test_vsseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsseg7e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf8_m( @@ -120,7 +120,7 @@ void test_vsseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsseg7e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf4_m( @@ -129,7 +129,7 @@ void test_vsseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsseg7e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8mf2_m( @@ -138,7 +138,7 @@ void test_vsseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsseg7e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsseg7e8_v_u8m1_m( @@ -147,6 +147,6 @@ void test_vsseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg7e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsseg7e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsseg7e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e16.c index 006215fc42c6..4cdf2333c794 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsseg8e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsseg8e16_v_f16mf4(_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsseg8e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsseg8e16_v_f16mf2(_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsseg8e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf4( @@ -40,7 +40,7 @@ void test_vsseg8e16_v_f16m1(_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsseg8e16_v_i16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_i16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf2( @@ -49,7 +49,7 @@ void test_vsseg8e16_v_i16mf4(int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsseg8e16_v_i16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_i16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16m1( @@ -58,7 +58,7 @@ void test_vsseg8e16_v_i16mf2(int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsseg8e16_v_i16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_i16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf4( @@ -67,7 +67,7 @@ void test_vsseg8e16_v_i16m1(int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsseg8e16_v_u16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_u16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf2( @@ -76,7 +76,7 @@ void test_vsseg8e16_v_u16mf4(uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsseg8e16_v_u16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_u16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16m1( @@ -85,7 +85,7 @@ void test_vsseg8e16_v_u16mf2(uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsseg8e16_v_u16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_u16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4_m( @@ -94,7 +94,7 @@ void test_vsseg8e16_v_u16m1(uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsseg8e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2_m( @@ -103,7 +103,7 @@ void test_vsseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsseg8e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1_m( @@ -112,7 +112,7 @@ void test_vsseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsseg8e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf4_m( @@ -121,7 +121,7 @@ void test_vsseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsseg8e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_i16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16mf2_m( @@ -130,7 +130,7 @@ void test_vsseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsseg8e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_i16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_i16m1_m( @@ -139,7 +139,7 @@ void test_vsseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsseg8e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_i16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf4_m( @@ -148,7 +148,7 @@ void test_vsseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsseg8e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_u16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16mf2_m( @@ -157,7 +157,7 @@ void test_vsseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsseg8e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_u16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e16_v_u16m1_m( @@ -166,6 +166,6 @@ void test_vsseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg8e16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsseg8e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e16_v_u16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e32.c index 40a24bff71ac..678cf567333a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsseg8e32_v_f32mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_f32mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vsseg8e32_v_f32mf2(float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vf // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsseg8e32_v_f32m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_f32m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e32_v_i32mf2( @@ -31,7 +31,7 @@ void test_vsseg8e32_v_f32m1(float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsseg8e32_v_i32mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_i32mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e32_v_i32m1( @@ -40,7 +40,7 @@ void test_vsseg8e32_v_i32mf2(int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsseg8e32_v_i32m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_i32m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e32_v_u32mf2( @@ -49,7 +49,7 @@ void test_vsseg8e32_v_i32m1(int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsseg8e32_v_u32mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_u32mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e32_v_u32m1( @@ -58,7 +58,7 @@ void test_vsseg8e32_v_u32mf2(uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, v // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsseg8e32_v_u32m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_u32m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e32_v_f32mf2_m( @@ -67,7 +67,7 @@ void test_vsseg8e32_v_u32m1(uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsseg8e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e32_v_f32m1_m( @@ -76,7 +76,7 @@ void test_vsseg8e32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsseg8e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_f32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e32_v_i32mf2_m( @@ -85,7 +85,7 @@ void test_vsseg8e32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t v0, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsseg8e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_i32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e32_v_i32m1_m( @@ -94,7 +94,7 @@ void test_vsseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsseg8e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_i32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e32_v_u32mf2_m( @@ -103,7 +103,7 @@ void test_vsseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsseg8e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_u32mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e32_v_u32m1_m( @@ -112,6 +112,6 @@ void test_vsseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsseg8e32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsseg8e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e32_v_u32m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e64.c index b16d98be4789..85a837a0c788 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg8e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsseg8e64_v_f64m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e64_v_f64m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e64_v_i64m1( @@ -22,7 +22,7 @@ void test_vsseg8e64_v_f64m1(double *base, vfloat64m1_t v0, vfloat64m1_t v1, vflo // CHECK-RV64-NEXT: ret void // void test_vsseg8e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsseg8e64_v_i64m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e64_v_i64m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e64_v_u64m1( @@ -31,7 +31,7 @@ void test_vsseg8e64_v_i64m1(int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m // CHECK-RV64-NEXT: ret void // void test_vsseg8e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsseg8e64_v_u64m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e64_v_u64m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e64_v_f64m1_m( @@ -40,7 +40,7 @@ void test_vsseg8e64_v_u64m1(uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg8e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsseg8e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e64_v_f64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e64_v_i64m1_m( @@ -49,7 +49,7 @@ void test_vsseg8e64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t v0, vfl // CHECK-RV64-NEXT: ret void // void test_vsseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsseg8e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e64_v_i64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e64_v_u64m1_m( @@ -58,6 +58,6 @@ void test_vsseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsseg8e64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsseg8e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e64_v_u64m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e8.c index 410a49cd96f5..9887306823ec 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsseg8e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsseg8e8_v_i8mf8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_i8mf8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vsseg8e8_v_i8mf8(int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsseg8e8_v_i8mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_i8mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vsseg8e8_v_i8mf4(int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsseg8e8_v_i8mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_i8mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vsseg8e8_v_i8mf2(int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsseg8e8_v_i8m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_i8m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf8( @@ -48,7 +48,7 @@ void test_vsseg8e8_v_i8m1(int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2 // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsseg8e8_v_u8mf8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_u8mf8(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf4( @@ -57,7 +57,7 @@ void test_vsseg8e8_v_u8mf8(uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsseg8e8_v_u8mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_u8mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf2( @@ -66,7 +66,7 @@ void test_vsseg8e8_v_u8mf4(uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsseg8e8_v_u8mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_u8mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8m1( @@ -75,7 +75,7 @@ void test_vsseg8e8_v_u8mf2(uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8 // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsseg8e8_v_u8m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_u8m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf8_m( @@ -84,7 +84,7 @@ void test_vsseg8e8_v_u8m1(uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_ // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsseg8e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_i8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf4_m( @@ -93,7 +93,7 @@ void test_vsseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsseg8e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_i8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8mf2_m( @@ -102,7 +102,7 @@ void test_vsseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsseg8e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_i8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_i8m1_m( @@ -111,7 +111,7 @@ void test_vsseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsseg8e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_i8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf8_m( @@ -120,7 +120,7 @@ void test_vsseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t v0, vint8m1_t // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsseg8e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_u8mf8_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf4_m( @@ -129,7 +129,7 @@ void test_vsseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsseg8e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_u8mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8mf2_m( @@ -138,7 +138,7 @@ void test_vsseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsseg8e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_u8mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsseg8e8_v_u8m1_m( @@ -147,6 +147,6 @@ void test_vsseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vsseg8e8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsseg8e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsseg8e8_v_u8m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c index 5c7724641cf7..43e55e98793d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssra.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssra_vv_i8mf8(op1, shift, vl); + return __riscv_vssra_vv_i8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf8(op1, shift, vl); + return __riscv_vssra_vx_i8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssra_vv_i8mf4(op1, shift, vl); + return __riscv_vssra_vv_i8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf4(op1, shift, vl); + return __riscv_vssra_vx_i8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssra_vv_i8mf2(op1, shift, vl); + return __riscv_vssra_vv_i8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf2(op1, shift, vl); + return __riscv_vssra_vx_i8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssra_vv_i8m1(op1, shift, vl); + return __riscv_vssra_vv_i8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m1(op1, shift, vl); + return __riscv_vssra_vx_i8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssra_vv_i8m2(op1, shift, vl); + return __riscv_vssra_vv_i8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m2(op1, shift, vl); + return __riscv_vssra_vx_i8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssra_vv_i8m4(op1, shift, vl); + return __riscv_vssra_vv_i8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m4(op1, shift, vl); + return __riscv_vssra_vx_i8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssra_vv_i8m8(op1, shift, vl); + return __riscv_vssra_vv_i8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m8(op1, shift, vl); + return __riscv_vssra_vx_i8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssra_vv_i16mf4(op1, shift, vl); + return __riscv_vssra_vv_i16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf4(op1, shift, vl); + return __riscv_vssra_vx_i16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssra_vv_i16mf2(op1, shift, vl); + return __riscv_vssra_vv_i16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf2(op1, shift, vl); + return __riscv_vssra_vx_i16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssra_vv_i16m1(op1, shift, vl); + return __riscv_vssra_vv_i16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m1(op1, shift, vl); + return __riscv_vssra_vx_i16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssra_vv_i16m2(op1, shift, vl); + return __riscv_vssra_vv_i16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m2(op1, shift, vl); + return __riscv_vssra_vx_i16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssra_vv_i16m4(op1, shift, vl); + return __riscv_vssra_vv_i16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m4(op1, shift, vl); + return __riscv_vssra_vx_i16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssra_vv_i16m8(op1, shift, vl); + return __riscv_vssra_vv_i16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m8(op1, shift, vl); + return __riscv_vssra_vx_i16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssra_vv_i32mf2(op1, shift, vl); + return __riscv_vssra_vv_i32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32mf2(op1, shift, vl); + return __riscv_vssra_vx_i32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssra_vv_i32m1(op1, shift, vl); + return __riscv_vssra_vv_i32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m1(op1, shift, vl); + return __riscv_vssra_vx_i32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssra_vv_i32m2(op1, shift, vl); + return __riscv_vssra_vv_i32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m2(op1, shift, vl); + return __riscv_vssra_vx_i32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssra_vv_i32m4(op1, shift, vl); + return __riscv_vssra_vv_i32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m4(op1, shift, vl); + return __riscv_vssra_vx_i32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssra_vv_i32m8(op1, shift, vl); + return __riscv_vssra_vv_i32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m8(op1, shift, vl); + return __riscv_vssra_vx_i32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssra_vv_i64m1(op1, shift, vl); + return __riscv_vssra_vv_i64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m1(op1, shift, vl); + return __riscv_vssra_vx_i64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssra_vv_i64m2(op1, shift, vl); + return __riscv_vssra_vv_i64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m2(op1, shift, vl); + return __riscv_vssra_vx_i64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssra_vv_i64m4(op1, shift, vl); + return __riscv_vssra_vv_i64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m4(op1, shift, vl); + return __riscv_vssra_vx_i64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssra_vv_i64m8(op1, shift, vl); + return __riscv_vssra_vv_i64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m8(op1, shift, vl); + return __riscv_vssra_vx_i64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssra_vv_i8mf8_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf8_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssra_vv_i8mf4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssra_vv_i8mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssra_vv_i8m1_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m1_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssra_vv_i8m2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssra_vv_i8m4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssra_vv_i8m8_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m8_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssra_vv_i16mf4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssra_vv_i16mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssra_vv_i16m1_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m1_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssra_vv_i16m2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssra_vv_i16m4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssra_vv_i16m8_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m8_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssra_vv_i32mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32mf2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssra_vv_i32m1_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m1_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssra_vv_i32m2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssra_vv_i32m4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssra_vv_i32m8_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m8_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssra_vv_i64m1_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m1_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssra_vv_i64m2_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m2_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssra_vv_i64m4_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m4_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssra_vv_i64m8_m(mask, op1, shift, vl); + return __riscv_vssra_vv_i64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m8_m(mask, op1, shift, vl); + return __riscv_vssra_vx_i64m8_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c index 3ea9c30c86d1..9ead80d30da0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssrl.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssrl_vv_u8mf8(op1, shift, vl); + return __riscv_vssrl_vv_u8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf8(op1, shift, vl); + return __riscv_vssrl_vx_u8mf8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssrl_vv_u8mf4(op1, shift, vl); + return __riscv_vssrl_vv_u8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf4(op1, shift, vl); + return __riscv_vssrl_vx_u8mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssrl_vv_u8mf2(op1, shift, vl); + return __riscv_vssrl_vv_u8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf2(op1, shift, vl); + return __riscv_vssrl_vx_u8mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssrl_vv_u8m1(op1, shift, vl); + return __riscv_vssrl_vv_u8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m1(op1, shift, vl); + return __riscv_vssrl_vx_u8m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssrl_vv_u8m2(op1, shift, vl); + return __riscv_vssrl_vv_u8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m2(op1, shift, vl); + return __riscv_vssrl_vx_u8m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssrl_vv_u8m4(op1, shift, vl); + return __riscv_vssrl_vv_u8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m4(op1, shift, vl); + return __riscv_vssrl_vx_u8m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssrl_vv_u8m8(op1, shift, vl); + return __riscv_vssrl_vv_u8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m8(op1, shift, vl); + return __riscv_vssrl_vx_u8m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssrl_vv_u16mf4(op1, shift, vl); + return __riscv_vssrl_vv_u16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf4(op1, shift, vl); + return __riscv_vssrl_vx_u16mf4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssrl_vv_u16mf2(op1, shift, vl); + return __riscv_vssrl_vv_u16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf2(op1, shift, vl); + return __riscv_vssrl_vx_u16mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssrl_vv_u16m1(op1, shift, vl); + return __riscv_vssrl_vv_u16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m1(op1, shift, vl); + return __riscv_vssrl_vx_u16m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssrl_vv_u16m2(op1, shift, vl); + return __riscv_vssrl_vv_u16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m2(op1, shift, vl); + return __riscv_vssrl_vx_u16m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssrl_vv_u16m4(op1, shift, vl); + return __riscv_vssrl_vv_u16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m4(op1, shift, vl); + return __riscv_vssrl_vx_u16m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssrl_vv_u16m8(op1, shift, vl); + return __riscv_vssrl_vv_u16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m8(op1, shift, vl); + return __riscv_vssrl_vx_u16m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssrl_vv_u32mf2(op1, shift, vl); + return __riscv_vssrl_vv_u32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32mf2(op1, shift, vl); + return __riscv_vssrl_vx_u32mf2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssrl_vv_u32m1(op1, shift, vl); + return __riscv_vssrl_vv_u32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m1(op1, shift, vl); + return __riscv_vssrl_vx_u32m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssrl_vv_u32m2(op1, shift, vl); + return __riscv_vssrl_vv_u32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m2(op1, shift, vl); + return __riscv_vssrl_vx_u32m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssrl_vv_u32m4(op1, shift, vl); + return __riscv_vssrl_vv_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m4(op1, shift, vl); + return __riscv_vssrl_vx_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssrl_vv_u32m8(op1, shift, vl); + return __riscv_vssrl_vv_u32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m8(op1, shift, vl); + return __riscv_vssrl_vx_u32m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1( @@ -336,7 +336,7 @@ vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssrl_vv_u64m1(op1, shift, vl); + return __riscv_vssrl_vv_u64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1( @@ -345,7 +345,7 @@ vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m1(op1, shift, vl); + return __riscv_vssrl_vx_u64m1(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2( @@ -354,7 +354,7 @@ vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssrl_vv_u64m2(op1, shift, vl); + return __riscv_vssrl_vv_u64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2( @@ -363,7 +363,7 @@ vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m2(op1, shift, vl); + return __riscv_vssrl_vx_u64m2(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4( @@ -372,7 +372,7 @@ vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssrl_vv_u64m4(op1, shift, vl); + return __riscv_vssrl_vv_u64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4( @@ -381,7 +381,7 @@ vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m4(op1, shift, vl); + return __riscv_vssrl_vx_u64m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8( @@ -390,7 +390,7 @@ vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssrl_vv_u64m8(op1, shift, vl); + return __riscv_vssrl_vv_u64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m8(op1, shift, vl); + return __riscv_vssrl_vx_u64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssrl_vv_u8mf8_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m( @@ -417,7 +417,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf8_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8mf8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m( @@ -426,7 +426,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssrl_vv_u8mf4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m( @@ -435,7 +435,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m( @@ -444,7 +444,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssrl_vv_u8mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m( @@ -453,7 +453,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m( @@ -462,7 +462,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssrl_vv_u8m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m( @@ -471,7 +471,7 @@ vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m( @@ -480,7 +480,7 @@ vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssrl_vv_u8m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m( @@ -489,7 +489,7 @@ vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m( @@ -498,7 +498,7 @@ vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssrl_vv_u8m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m( @@ -507,7 +507,7 @@ vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m( @@ -516,7 +516,7 @@ vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssrl_vv_u8m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m( @@ -525,7 +525,7 @@ vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u8m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m( @@ -534,7 +534,7 @@ vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssrl_vv_u16mf4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m( @@ -543,7 +543,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16mf4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m( @@ -552,7 +552,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssrl_vv_u16mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m( @@ -561,7 +561,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m( @@ -570,7 +570,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssrl_vv_u16m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m( @@ -579,7 +579,7 @@ vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m( @@ -588,7 +588,7 @@ vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssrl_vv_u16m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m( @@ -597,7 +597,7 @@ vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m( @@ -606,7 +606,7 @@ vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssrl_vv_u16m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m( @@ -615,7 +615,7 @@ vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m( @@ -624,7 +624,7 @@ vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssrl_vv_u16m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m( @@ -633,7 +633,7 @@ vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u16m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m( @@ -642,7 +642,7 @@ vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssrl_vv_u32mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m( @@ -651,7 +651,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32mf2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u32mf2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m( @@ -660,7 +660,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssrl_vv_u32m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m( @@ -669,7 +669,7 @@ vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u32m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m( @@ -678,7 +678,7 @@ vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssrl_vv_u32m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m( @@ -687,7 +687,7 @@ vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u32m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m( @@ -696,7 +696,7 @@ vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssrl_vv_u32m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m( @@ -705,7 +705,7 @@ vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u32m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m( @@ -714,7 +714,7 @@ vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssrl_vv_u32m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m( @@ -723,7 +723,7 @@ vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u32m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m( @@ -732,7 +732,7 @@ vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssrl_vv_u64m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m( @@ -741,7 +741,7 @@ vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m1_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u64m1_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m( @@ -750,7 +750,7 @@ vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssrl_vv_u64m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m( @@ -759,7 +759,7 @@ vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m2_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u64m2_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m( @@ -768,7 +768,7 @@ vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssrl_vv_u64m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m( @@ -777,7 +777,7 @@ vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m4_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u64m4_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m( @@ -786,7 +786,7 @@ vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssrl_vv_u64m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vv_u64m8_m(mask, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m8_m(mask, op1, shift, vl); + return __riscv_vssrl_vx_u64m8_m(mask, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e16.c index cc4637d9fcb0..849db6cf4b63 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vssseg2e16_v_f16mf4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_f16mf4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vssseg2e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vssseg2e16_v_f16mf2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_f16mf2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vssseg2e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vssseg2e16_v_f16m1(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_f16m1(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2( @@ -40,7 +40,7 @@ void test_vssseg2e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vssseg2e16_v_f16m2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_f16m2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4( @@ -49,7 +49,7 @@ void test_vssseg2e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vssseg2e16_v_f16m4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_f16m4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_i16mf4( @@ -58,7 +58,7 @@ void test_vssseg2e16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vssseg2e16_v_i16mf4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_i16mf4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_i16mf2( @@ -67,7 +67,7 @@ void test_vssseg2e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vssseg2e16_v_i16mf2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_i16mf2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m1( @@ -76,7 +76,7 @@ void test_vssseg2e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vssseg2e16_v_i16m1(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_i16m1(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m2( @@ -85,7 +85,7 @@ void test_vssseg2e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vssseg2e16_v_i16m2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_i16m2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m4( @@ -94,7 +94,7 @@ void test_vssseg2e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vssseg2e16_v_i16m4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_i16m4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_u16mf4( @@ -103,7 +103,7 @@ void test_vssseg2e16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vssseg2e16_v_u16mf4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_u16mf4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_u16mf2( @@ -112,7 +112,7 @@ void test_vssseg2e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vssseg2e16_v_u16mf2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_u16mf2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m1( @@ -121,7 +121,7 @@ void test_vssseg2e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vssseg2e16_v_u16m1(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_u16m1(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m2( @@ -130,7 +130,7 @@ void test_vssseg2e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vssseg2e16_v_u16m2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_u16m2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m4( @@ -139,7 +139,7 @@ void test_vssseg2e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vssseg2e16_v_u16m4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_u16m4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4_m( @@ -148,7 +148,7 @@ void test_vssseg2e16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vssseg2e16_v_f16mf4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_f16mf4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2_m( @@ -157,7 +157,7 @@ void test_vssseg2e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vssseg2e16_v_f16mf2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_f16mf2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1_m( @@ -166,7 +166,7 @@ void test_vssseg2e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vssseg2e16_v_f16m1_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_f16m1_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2_m( @@ -175,7 +175,7 @@ void test_vssseg2e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vssseg2e16_v_f16m2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_f16m2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4_m( @@ -184,7 +184,7 @@ void test_vssseg2e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vssseg2e16_v_f16m4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_f16m4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_i16mf4_m( @@ -193,7 +193,7 @@ void test_vssseg2e16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vssseg2e16_v_i16mf4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_i16mf4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_i16mf2_m( @@ -202,7 +202,7 @@ void test_vssseg2e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vssseg2e16_v_i16mf2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_i16mf2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m1_m( @@ -211,7 +211,7 @@ void test_vssseg2e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vssseg2e16_v_i16m1_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_i16m1_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m2_m( @@ -220,7 +220,7 @@ void test_vssseg2e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vssseg2e16_v_i16m2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_i16m2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_i16m4_m( @@ -229,7 +229,7 @@ void test_vssseg2e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vssseg2e16_v_i16m4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_i16m4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_u16mf4_m( @@ -238,7 +238,7 @@ void test_vssseg2e16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vssseg2e16_v_u16mf4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_u16mf4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_u16mf2_m( @@ -247,7 +247,7 @@ void test_vssseg2e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vssseg2e16_v_u16mf2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_u16mf2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m1_m( @@ -256,7 +256,7 @@ void test_vssseg2e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vssseg2e16_v_u16m1_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_u16m1_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m2_m( @@ -265,7 +265,7 @@ void test_vssseg2e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vssseg2e16_v_u16m2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_u16m2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e16_v_u16m4_m( @@ -274,6 +274,6 @@ void test_vssseg2e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vssseg2e16_v_u16m4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e16_v_u16m4_m(mask, base, bstride, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32.c index 6c021e4a82c8..d421bf71d669 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vssseg2e32_v_f32mf2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_f32mf2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vssseg2e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vssseg2e32_v_f32m1(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_f32m1(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m2( @@ -31,7 +31,7 @@ void test_vssseg2e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vssseg2e32_v_f32m2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_f32m2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m4( @@ -40,7 +40,7 @@ void test_vssseg2e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vssseg2e32_v_f32m4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_f32m4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_i32mf2( @@ -49,7 +49,7 @@ void test_vssseg2e32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vssseg2e32_v_i32mf2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_i32mf2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m1( @@ -58,7 +58,7 @@ void test_vssseg2e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vssseg2e32_v_i32m1(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_i32m1(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m2( @@ -67,7 +67,7 @@ void test_vssseg2e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vssseg2e32_v_i32m2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_i32m2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m4( @@ -76,7 +76,7 @@ void test_vssseg2e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vssseg2e32_v_i32m4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_i32m4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_u32mf2( @@ -85,7 +85,7 @@ void test_vssseg2e32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vssseg2e32_v_u32mf2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_u32mf2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m1( @@ -94,7 +94,7 @@ void test_vssseg2e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vssseg2e32_v_u32m1(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_u32m1(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m2( @@ -103,7 +103,7 @@ void test_vssseg2e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vssseg2e32_v_u32m2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_u32m2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m4( @@ -112,7 +112,7 @@ void test_vssseg2e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vssseg2e32_v_u32m4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_u32m4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_f32mf2_m( @@ -121,7 +121,7 @@ void test_vssseg2e32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vssseg2e32_v_f32mf2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_f32mf2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m1_m( @@ -130,7 +130,7 @@ void test_vssseg2e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vssseg2e32_v_f32m1_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_f32m1_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m2_m( @@ -139,7 +139,7 @@ void test_vssseg2e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vssseg2e32_v_f32m2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_f32m2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_f32m4_m( @@ -148,7 +148,7 @@ void test_vssseg2e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vssseg2e32_v_f32m4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_f32m4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_i32mf2_m( @@ -157,7 +157,7 @@ void test_vssseg2e32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, vf // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vssseg2e32_v_i32mf2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_i32mf2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m1_m( @@ -166,7 +166,7 @@ void test_vssseg2e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vssseg2e32_v_i32m1_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_i32m1_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m2_m( @@ -175,7 +175,7 @@ void test_vssseg2e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vssseg2e32_v_i32m2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_i32m2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_i32m4_m( @@ -184,7 +184,7 @@ void test_vssseg2e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vssseg2e32_v_i32m4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_i32m4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_u32mf2_m( @@ -193,7 +193,7 @@ void test_vssseg2e32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vssseg2e32_v_u32mf2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_u32mf2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m1_m( @@ -202,7 +202,7 @@ void test_vssseg2e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vssseg2e32_v_u32m1_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_u32m1_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m2_m( @@ -211,7 +211,7 @@ void test_vssseg2e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vssseg2e32_v_u32m2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_u32m2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e32_v_u32m4_m( @@ -220,6 +220,6 @@ void test_vssseg2e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg2e32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vssseg2e32_v_u32m4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e32_v_u32m4_m(mask, base, bstride, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e64.c index a4fb9e9803bb..da19da319bff 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vssseg2e64_v_f64m1(base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_f64m1(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_f64m2( @@ -22,7 +22,7 @@ void test_vssseg2e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vssseg2e64_v_f64m2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_f64m2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_f64m4( @@ -31,7 +31,7 @@ void test_vssseg2e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vssseg2e64_v_f64m4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_f64m4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m1( @@ -40,7 +40,7 @@ void test_vssseg2e64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vssseg2e64_v_i64m1(base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_i64m1(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m2( @@ -49,7 +49,7 @@ void test_vssseg2e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vssseg2e64_v_i64m2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_i64m2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m4( @@ -58,7 +58,7 @@ void test_vssseg2e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vssseg2e64_v_i64m4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_i64m4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m1( @@ -67,7 +67,7 @@ void test_vssseg2e64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vssseg2e64_v_u64m1(base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_u64m1(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m2( @@ -76,7 +76,7 @@ void test_vssseg2e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vssseg2e64_v_u64m2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_u64m2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m4( @@ -85,7 +85,7 @@ void test_vssseg2e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vssseg2e64_v_u64m4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_u64m4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_f64m1_m( @@ -94,7 +94,7 @@ void test_vssseg2e64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vssseg2e64_v_f64m1_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_f64m1_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_f64m2_m( @@ -103,7 +103,7 @@ void test_vssseg2e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vssseg2e64_v_f64m2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_f64m2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_f64m4_m( @@ -112,7 +112,7 @@ void test_vssseg2e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vssseg2e64_v_f64m4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_f64m4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m1_m( @@ -121,7 +121,7 @@ void test_vssseg2e64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vssseg2e64_v_i64m1_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_i64m1_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m2_m( @@ -130,7 +130,7 @@ void test_vssseg2e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vssseg2e64_v_i64m2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_i64m2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_i64m4_m( @@ -139,7 +139,7 @@ void test_vssseg2e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vssseg2e64_v_i64m4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_i64m4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m1_m( @@ -148,7 +148,7 @@ void test_vssseg2e64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vssseg2e64_v_u64m1_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_u64m1_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m2_m( @@ -157,7 +157,7 @@ void test_vssseg2e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vssseg2e64_v_u64m2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_u64m2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e64_v_u64m4_m( @@ -166,6 +166,6 @@ void test_vssseg2e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg2e64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vssseg2e64_v_u64m4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e64_v_u64m4_m(mask, base, bstride, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e8.c index 3a5e4a2f9d00..3473fb682495 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg2e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vssseg2e8_v_i8mf8(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8mf8(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vssseg2e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vssseg2e8_v_i8mf4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8mf4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vssseg2e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vssseg2e8_v_i8mf2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8mf2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vssseg2e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vssseg2e8_v_i8m1(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8m1(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m2( @@ -48,7 +48,7 @@ void test_vssseg2e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vssseg2e8_v_i8m2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8m2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m4( @@ -57,7 +57,7 @@ void test_vssseg2e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vssseg2e8_v_i8m4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8m4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf8( @@ -66,7 +66,7 @@ void test_vssseg2e8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vssseg2e8_v_u8mf8(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8mf8(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf4( @@ -75,7 +75,7 @@ void test_vssseg2e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vssseg2e8_v_u8mf4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8mf4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf2( @@ -84,7 +84,7 @@ void test_vssseg2e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vssseg2e8_v_u8mf2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8mf2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m1( @@ -93,7 +93,7 @@ void test_vssseg2e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vssseg2e8_v_u8m1(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8m1(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m2( @@ -102,7 +102,7 @@ void test_vssseg2e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vssseg2e8_v_u8m2(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8m2(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m4( @@ -111,7 +111,7 @@ void test_vssseg2e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vssseg2e8_v_u8m4(base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8m4(base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_i8mf8_m( @@ -120,7 +120,7 @@ void test_vssseg2e8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vssseg2e8_v_i8mf8_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8mf8_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_i8mf4_m( @@ -129,7 +129,7 @@ void test_vssseg2e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vssseg2e8_v_i8mf4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8mf4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_i8mf2_m( @@ -138,7 +138,7 @@ void test_vssseg2e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vssseg2e8_v_i8mf2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8mf2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m1_m( @@ -147,7 +147,7 @@ void test_vssseg2e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vssseg2e8_v_i8m1_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8m1_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m2_m( @@ -156,7 +156,7 @@ void test_vssseg2e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vssseg2e8_v_i8m2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8m2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_i8m4_m( @@ -165,7 +165,7 @@ void test_vssseg2e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vssseg2e8_v_i8m4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_i8m4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf8_m( @@ -174,7 +174,7 @@ void test_vssseg2e8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vssseg2e8_v_u8mf8_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8mf8_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf4_m( @@ -183,7 +183,7 @@ void test_vssseg2e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vssseg2e8_v_u8mf4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8mf4_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8mf2_m( @@ -192,7 +192,7 @@ void test_vssseg2e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vssseg2e8_v_u8mf2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8mf2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m1_m( @@ -201,7 +201,7 @@ void test_vssseg2e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vssseg2e8_v_u8m1_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8m1_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m2_m( @@ -210,7 +210,7 @@ void test_vssseg2e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vu // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vssseg2e8_v_u8m2_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8m2_m(mask, base, bstride, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vssseg2e8_v_u8m4_m( @@ -219,6 +219,6 @@ void test_vssseg2e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vu // CHECK-RV64-NEXT: ret void // void test_vssseg2e8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vssseg2e8_v_u8m4_m(mask, base, bstride, v0, v1, vl); + return __riscv_vssseg2e8_v_u8m4_m(mask, base, bstride, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e16.c index a4cb9231f716..7700a16da917 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vssseg3e16_v_f16mf4(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_f16mf4(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vssseg3e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vssseg3e16_v_f16mf2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_f16mf2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vssseg3e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vssseg3e16_v_f16m1(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_f16m1(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2( @@ -40,7 +40,7 @@ void test_vssseg3e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vssseg3e16_v_f16m2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_f16m2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_i16mf4( @@ -49,7 +49,7 @@ void test_vssseg3e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vssseg3e16_v_i16mf4(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_i16mf4(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_i16mf2( @@ -58,7 +58,7 @@ void test_vssseg3e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vssseg3e16_v_i16mf2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_i16mf2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_i16m1( @@ -67,7 +67,7 @@ void test_vssseg3e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vssseg3e16_v_i16m1(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_i16m1(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_i16m2( @@ -76,7 +76,7 @@ void test_vssseg3e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vssseg3e16_v_i16m2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_i16m2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_u16mf4( @@ -85,7 +85,7 @@ void test_vssseg3e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vssseg3e16_v_u16mf4(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_u16mf4(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_u16mf2( @@ -94,7 +94,7 @@ void test_vssseg3e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vssseg3e16_v_u16mf2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_u16mf2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_u16m1( @@ -103,7 +103,7 @@ void test_vssseg3e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vssseg3e16_v_u16m1(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_u16m1(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_u16m2( @@ -112,7 +112,7 @@ void test_vssseg3e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vssseg3e16_v_u16m2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_u16m2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4_m( @@ -121,7 +121,7 @@ void test_vssseg3e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vssseg3e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2_m( @@ -130,7 +130,7 @@ void test_vssseg3e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vssseg3e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1_m( @@ -139,7 +139,7 @@ void test_vssseg3e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vssseg3e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2_m( @@ -148,7 +148,7 @@ void test_vssseg3e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vssseg3e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_i16mf4_m( @@ -157,7 +157,7 @@ void test_vssseg3e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vssseg3e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_i16mf2_m( @@ -166,7 +166,7 @@ void test_vssseg3e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vssseg3e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_i16m1_m( @@ -175,7 +175,7 @@ void test_vssseg3e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vssseg3e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_i16m2_m( @@ -184,7 +184,7 @@ void test_vssseg3e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vssseg3e16_v_i16m2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_i16m2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_u16mf4_m( @@ -193,7 +193,7 @@ void test_vssseg3e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vssseg3e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_u16mf2_m( @@ -202,7 +202,7 @@ void test_vssseg3e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vssseg3e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_u16m1_m( @@ -211,7 +211,7 @@ void test_vssseg3e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vssseg3e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e16_v_u16m2_m( @@ -220,6 +220,6 @@ void test_vssseg3e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg3e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vssseg3e16_v_u16m2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e16_v_u16m2_m(mask, base, bstride, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e32.c index baeaf7371861..0318f55e1b26 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vssseg3e32_v_f32mf2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_f32mf2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vssseg3e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vssseg3e32_v_f32m1(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_f32m1(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_f32m2( @@ -31,7 +31,7 @@ void test_vssseg3e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vssseg3e32_v_f32m2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_f32m2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_i32mf2( @@ -40,7 +40,7 @@ void test_vssseg3e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vssseg3e32_v_i32mf2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_i32mf2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_i32m1( @@ -49,7 +49,7 @@ void test_vssseg3e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vssseg3e32_v_i32m1(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_i32m1(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_i32m2( @@ -58,7 +58,7 @@ void test_vssseg3e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vssseg3e32_v_i32m2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_i32m2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_u32mf2( @@ -67,7 +67,7 @@ void test_vssseg3e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vssseg3e32_v_u32mf2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_u32mf2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_u32m1( @@ -76,7 +76,7 @@ void test_vssseg3e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vssseg3e32_v_u32m1(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_u32m1(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_u32m2( @@ -85,7 +85,7 @@ void test_vssseg3e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vssseg3e32_v_u32m2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_u32m2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_f32mf2_m( @@ -94,7 +94,7 @@ void test_vssseg3e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vssseg3e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_f32m1_m( @@ -103,7 +103,7 @@ void test_vssseg3e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vssseg3e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_f32m2_m( @@ -112,7 +112,7 @@ void test_vssseg3e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vssseg3e32_v_f32m2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_f32m2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_i32mf2_m( @@ -121,7 +121,7 @@ void test_vssseg3e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vssseg3e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_i32m1_m( @@ -130,7 +130,7 @@ void test_vssseg3e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vssseg3e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_i32m2_m( @@ -139,7 +139,7 @@ void test_vssseg3e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vssseg3e32_v_i32m2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_i32m2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_u32mf2_m( @@ -148,7 +148,7 @@ void test_vssseg3e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vssseg3e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_u32m1_m( @@ -157,7 +157,7 @@ void test_vssseg3e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vssseg3e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e32_v_u32m2_m( @@ -166,6 +166,6 @@ void test_vssseg3e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg3e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vssseg3e32_v_u32m2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e32_v_u32m2_m(mask, base, bstride, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e64.c index 4910e3bce02e..66571a3013ef 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vssseg3e64_v_f64m1(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_f64m1(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e64_v_f64m2( @@ -22,7 +22,7 @@ void test_vssseg3e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vssseg3e64_v_f64m2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_f64m2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e64_v_i64m1( @@ -31,7 +31,7 @@ void test_vssseg3e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vssseg3e64_v_i64m1(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_i64m1(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e64_v_i64m2( @@ -40,7 +40,7 @@ void test_vssseg3e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vssseg3e64_v_i64m2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_i64m2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e64_v_u64m1( @@ -49,7 +49,7 @@ void test_vssseg3e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vssseg3e64_v_u64m1(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_u64m1(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e64_v_u64m2( @@ -58,7 +58,7 @@ void test_vssseg3e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vssseg3e64_v_u64m2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_u64m2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e64_v_f64m1_m( @@ -67,7 +67,7 @@ void test_vssseg3e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vssseg3e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e64_v_f64m2_m( @@ -76,7 +76,7 @@ void test_vssseg3e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vssseg3e64_v_f64m2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_f64m2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e64_v_i64m1_m( @@ -85,7 +85,7 @@ void test_vssseg3e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vssseg3e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e64_v_i64m2_m( @@ -94,7 +94,7 @@ void test_vssseg3e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vssseg3e64_v_i64m2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_i64m2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e64_v_u64m1_m( @@ -103,7 +103,7 @@ void test_vssseg3e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vssseg3e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e64_v_u64m2_m( @@ -112,6 +112,6 @@ void test_vssseg3e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg3e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vssseg3e64_v_u64m2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e64_v_u64m2_m(mask, base, bstride, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e8.c index 2ef34c68063b..d2dcf9c83e02 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg3e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vssseg3e8_v_i8mf8(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_i8mf8(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vssseg3e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vssseg3e8_v_i8mf4(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_i8mf4(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vssseg3e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vssseg3e8_v_i8mf2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_i8mf2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vssseg3e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vssseg3e8_v_i8m1(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_i8m1(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_i8m2( @@ -48,7 +48,7 @@ void test_vssseg3e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vssseg3e8_v_i8m2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_i8m2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf8( @@ -57,7 +57,7 @@ void test_vssseg3e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vssseg3e8_v_u8mf8(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_u8mf8(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf4( @@ -66,7 +66,7 @@ void test_vssseg3e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vssseg3e8_v_u8mf4(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_u8mf4(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf2( @@ -75,7 +75,7 @@ void test_vssseg3e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vssseg3e8_v_u8mf2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_u8mf2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_u8m1( @@ -84,7 +84,7 @@ void test_vssseg3e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vssseg3e8_v_u8m1(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_u8m1(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_u8m2( @@ -93,7 +93,7 @@ void test_vssseg3e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vssseg3e8_v_u8m2(base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_u8m2(base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_i8mf8_m( @@ -102,7 +102,7 @@ void test_vssseg3e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vssseg3e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_i8mf4_m( @@ -111,7 +111,7 @@ void test_vssseg3e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vssseg3e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_i8mf2_m( @@ -120,7 +120,7 @@ void test_vssseg3e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vssseg3e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_i8m1_m( @@ -129,7 +129,7 @@ void test_vssseg3e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vssseg3e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_i8m2_m( @@ -138,7 +138,7 @@ void test_vssseg3e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vssseg3e8_v_i8m2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_i8m2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf8_m( @@ -147,7 +147,7 @@ void test_vssseg3e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vssseg3e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf4_m( @@ -156,7 +156,7 @@ void test_vssseg3e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vssseg3e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_u8mf2_m( @@ -165,7 +165,7 @@ void test_vssseg3e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vssseg3e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_u8m1_m( @@ -174,7 +174,7 @@ void test_vssseg3e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vssseg3e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vssseg3e8_v_u8m2_m( @@ -183,6 +183,6 @@ void test_vssseg3e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vu // CHECK-RV64-NEXT: ret void // void test_vssseg3e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vssseg3e8_v_u8m2_m(mask, base, bstride, v0, v1, v2, vl); + return __riscv_vssseg3e8_v_u8m2_m(mask, base, bstride, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e16.c index 05c281227f49..9fd720cae67a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vssseg4e16_v_f16mf4(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_f16mf4(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vssseg4e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vssseg4e16_v_f16mf2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_f16mf2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vssseg4e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vssseg4e16_v_f16m1(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_f16m1(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2( @@ -40,7 +40,7 @@ void test_vssseg4e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vssseg4e16_v_f16m2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_f16m2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_i16mf4( @@ -49,7 +49,7 @@ void test_vssseg4e16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vssseg4e16_v_i16mf4(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_i16mf4(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_i16mf2( @@ -58,7 +58,7 @@ void test_vssseg4e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vssseg4e16_v_i16mf2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_i16mf2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_i16m1( @@ -67,7 +67,7 @@ void test_vssseg4e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vssseg4e16_v_i16m1(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_i16m1(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_i16m2( @@ -76,7 +76,7 @@ void test_vssseg4e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vssseg4e16_v_i16m2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_i16m2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_u16mf4( @@ -85,7 +85,7 @@ void test_vssseg4e16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vssseg4e16_v_u16mf4(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_u16mf4(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_u16mf2( @@ -94,7 +94,7 @@ void test_vssseg4e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vssseg4e16_v_u16mf2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_u16mf2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_u16m1( @@ -103,7 +103,7 @@ void test_vssseg4e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vssseg4e16_v_u16m1(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_u16m1(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_u16m2( @@ -112,7 +112,7 @@ void test_vssseg4e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vssseg4e16_v_u16m2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_u16m2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4_m( @@ -121,7 +121,7 @@ void test_vssseg4e16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vssseg4e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2_m( @@ -130,7 +130,7 @@ void test_vssseg4e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vssseg4e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1_m( @@ -139,7 +139,7 @@ void test_vssseg4e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vssseg4e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2_m( @@ -148,7 +148,7 @@ void test_vssseg4e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vssseg4e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_i16mf4_m( @@ -157,7 +157,7 @@ void test_vssseg4e16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vssseg4e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_i16mf2_m( @@ -166,7 +166,7 @@ void test_vssseg4e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vssseg4e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_i16m1_m( @@ -175,7 +175,7 @@ void test_vssseg4e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vssseg4e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_i16m2_m( @@ -184,7 +184,7 @@ void test_vssseg4e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vssseg4e16_v_i16m2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_i16m2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_u16mf4_m( @@ -193,7 +193,7 @@ void test_vssseg4e16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vssseg4e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_u16mf2_m( @@ -202,7 +202,7 @@ void test_vssseg4e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vssseg4e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_u16m1_m( @@ -211,7 +211,7 @@ void test_vssseg4e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vssseg4e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e16_v_u16m2_m( @@ -220,6 +220,6 @@ void test_vssseg4e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg4e16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vssseg4e16_v_u16m2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e16_v_u16m2_m(mask, base, bstride, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e32.c index 810dd8ad7932..9fe084ef7eff 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vssseg4e32_v_f32mf2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_f32mf2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vssseg4e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vssseg4e32_v_f32m1(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_f32m1(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_f32m2( @@ -31,7 +31,7 @@ void test_vssseg4e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vssseg4e32_v_f32m2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_f32m2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_i32mf2( @@ -40,7 +40,7 @@ void test_vssseg4e32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vssseg4e32_v_i32mf2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_i32mf2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_i32m1( @@ -49,7 +49,7 @@ void test_vssseg4e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vssseg4e32_v_i32m1(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_i32m1(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_i32m2( @@ -58,7 +58,7 @@ void test_vssseg4e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vssseg4e32_v_i32m2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_i32m2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_u32mf2( @@ -67,7 +67,7 @@ void test_vssseg4e32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vssseg4e32_v_u32mf2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_u32mf2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_u32m1( @@ -76,7 +76,7 @@ void test_vssseg4e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vssseg4e32_v_u32m1(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_u32m1(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_u32m2( @@ -85,7 +85,7 @@ void test_vssseg4e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vssseg4e32_v_u32m2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_u32m2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_f32mf2_m( @@ -94,7 +94,7 @@ void test_vssseg4e32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vssseg4e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_f32m1_m( @@ -103,7 +103,7 @@ void test_vssseg4e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vssseg4e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_f32m2_m( @@ -112,7 +112,7 @@ void test_vssseg4e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vssseg4e32_v_f32m2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_f32m2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_i32mf2_m( @@ -121,7 +121,7 @@ void test_vssseg4e32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vssseg4e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_i32m1_m( @@ -130,7 +130,7 @@ void test_vssseg4e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vssseg4e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_i32m2_m( @@ -139,7 +139,7 @@ void test_vssseg4e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vssseg4e32_v_i32m2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_i32m2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_u32mf2_m( @@ -148,7 +148,7 @@ void test_vssseg4e32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vssseg4e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_u32m1_m( @@ -157,7 +157,7 @@ void test_vssseg4e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vssseg4e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e32_v_u32m2_m( @@ -166,6 +166,6 @@ void test_vssseg4e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg4e32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vssseg4e32_v_u32m2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e32_v_u32m2_m(mask, base, bstride, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e64.c index b41b96f805c0..1a37521e2cfd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vssseg4e64_v_f64m1(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_f64m1(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e64_v_f64m2( @@ -22,7 +22,7 @@ void test_vssseg4e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vssseg4e64_v_f64m2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_f64m2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e64_v_i64m1( @@ -31,7 +31,7 @@ void test_vssseg4e64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vssseg4e64_v_i64m1(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_i64m1(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e64_v_i64m2( @@ -40,7 +40,7 @@ void test_vssseg4e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vssseg4e64_v_i64m2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_i64m2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e64_v_u64m1( @@ -49,7 +49,7 @@ void test_vssseg4e64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vssseg4e64_v_u64m1(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_u64m1(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e64_v_u64m2( @@ -58,7 +58,7 @@ void test_vssseg4e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vssseg4e64_v_u64m2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_u64m2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e64_v_f64m1_m( @@ -67,7 +67,7 @@ void test_vssseg4e64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vssseg4e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e64_v_f64m2_m( @@ -76,7 +76,7 @@ void test_vssseg4e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vssseg4e64_v_f64m2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_f64m2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e64_v_i64m1_m( @@ -85,7 +85,7 @@ void test_vssseg4e64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vssseg4e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e64_v_i64m2_m( @@ -94,7 +94,7 @@ void test_vssseg4e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vssseg4e64_v_i64m2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_i64m2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e64_v_u64m1_m( @@ -103,7 +103,7 @@ void test_vssseg4e64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vssseg4e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e64_v_u64m2_m( @@ -112,6 +112,6 @@ void test_vssseg4e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg4e64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vssseg4e64_v_u64m2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e64_v_u64m2_m(mask, base, bstride, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e8.c index 331f11a72c6a..5b0b96a95148 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg4e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vssseg4e8_v_i8mf8(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_i8mf8(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vssseg4e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vssseg4e8_v_i8mf4(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_i8mf4(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vssseg4e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vssseg4e8_v_i8mf2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_i8mf2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vssseg4e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vssseg4e8_v_i8m1(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_i8m1(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_i8m2( @@ -48,7 +48,7 @@ void test_vssseg4e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vssseg4e8_v_i8m2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_i8m2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf8( @@ -57,7 +57,7 @@ void test_vssseg4e8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vssseg4e8_v_u8mf8(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_u8mf8(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf4( @@ -66,7 +66,7 @@ void test_vssseg4e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vssseg4e8_v_u8mf4(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_u8mf4(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf2( @@ -75,7 +75,7 @@ void test_vssseg4e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vssseg4e8_v_u8mf2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_u8mf2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_u8m1( @@ -84,7 +84,7 @@ void test_vssseg4e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vssseg4e8_v_u8m1(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_u8m1(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_u8m2( @@ -93,7 +93,7 @@ void test_vssseg4e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vssseg4e8_v_u8m2(base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_u8m2(base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_i8mf8_m( @@ -102,7 +102,7 @@ void test_vssseg4e8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vssseg4e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_i8mf4_m( @@ -111,7 +111,7 @@ void test_vssseg4e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vssseg4e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_i8mf2_m( @@ -120,7 +120,7 @@ void test_vssseg4e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vssseg4e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_i8m1_m( @@ -129,7 +129,7 @@ void test_vssseg4e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vssseg4e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_i8m2_m( @@ -138,7 +138,7 @@ void test_vssseg4e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vssseg4e8_v_i8m2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_i8m2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf8_m( @@ -147,7 +147,7 @@ void test_vssseg4e8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vssseg4e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf4_m( @@ -156,7 +156,7 @@ void test_vssseg4e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vssseg4e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_u8mf2_m( @@ -165,7 +165,7 @@ void test_vssseg4e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vssseg4e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_u8m1_m( @@ -174,7 +174,7 @@ void test_vssseg4e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vssseg4e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vssseg4e8_v_u8m2_m( @@ -183,6 +183,6 @@ void test_vssseg4e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vu // CHECK-RV64-NEXT: ret void // void test_vssseg4e8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vssseg4e8_v_u8m2_m(mask, base, bstride, v0, v1, v2, v3, vl); + return __riscv_vssseg4e8_v_u8m2_m(mask, base, bstride, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e16.c index b932b38961cd..5ed2fbe4730c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vssseg5e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vssseg5e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vssseg5e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vssseg5e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vssseg5e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_i16mf4( @@ -40,7 +40,7 @@ void test_vssseg5e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vssseg5e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_i16mf2( @@ -49,7 +49,7 @@ void test_vssseg5e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vssseg5e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_i16m1( @@ -58,7 +58,7 @@ void test_vssseg5e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vssseg5e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_u16mf4( @@ -67,7 +67,7 @@ void test_vssseg5e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vssseg5e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_u16mf2( @@ -76,7 +76,7 @@ void test_vssseg5e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vssseg5e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_u16m1( @@ -85,7 +85,7 @@ void test_vssseg5e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vssseg5e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4_m( @@ -94,7 +94,7 @@ void test_vssseg5e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vssseg5e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2_m( @@ -103,7 +103,7 @@ void test_vssseg5e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vssseg5e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1_m( @@ -112,7 +112,7 @@ void test_vssseg5e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vssseg5e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_i16mf4_m( @@ -121,7 +121,7 @@ void test_vssseg5e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vssseg5e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_i16mf2_m( @@ -130,7 +130,7 @@ void test_vssseg5e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vssseg5e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_i16m1_m( @@ -139,7 +139,7 @@ void test_vssseg5e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vssseg5e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_u16mf4_m( @@ -148,7 +148,7 @@ void test_vssseg5e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vssseg5e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_u16mf2_m( @@ -157,7 +157,7 @@ void test_vssseg5e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vssseg5e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e16_v_u16m1_m( @@ -166,6 +166,6 @@ void test_vssseg5e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg5e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vssseg5e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e32.c index bbc7ec104b7e..0a1b5aaa8641 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vssseg5e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vssseg5e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vssseg5e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e32_v_i32mf2( @@ -31,7 +31,7 @@ void test_vssseg5e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vssseg5e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e32_v_i32m1( @@ -40,7 +40,7 @@ void test_vssseg5e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vssseg5e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e32_v_u32mf2( @@ -49,7 +49,7 @@ void test_vssseg5e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vssseg5e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e32_v_u32m1( @@ -58,7 +58,7 @@ void test_vssseg5e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vssseg5e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e32_v_f32mf2_m( @@ -67,7 +67,7 @@ void test_vssseg5e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vssseg5e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e32_v_f32m1_m( @@ -76,7 +76,7 @@ void test_vssseg5e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vssseg5e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e32_v_i32mf2_m( @@ -85,7 +85,7 @@ void test_vssseg5e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vssseg5e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e32_v_i32m1_m( @@ -94,7 +94,7 @@ void test_vssseg5e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vssseg5e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e32_v_u32mf2_m( @@ -103,7 +103,7 @@ void test_vssseg5e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vssseg5e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e32_v_u32m1_m( @@ -112,6 +112,6 @@ void test_vssseg5e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg5e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vssseg5e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e64.c index 60e7bdc5889e..95cbae1ed6b3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg5e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vssseg5e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e64_v_i64m1( @@ -22,7 +22,7 @@ void test_vssseg5e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vssseg5e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vssseg5e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e64_v_u64m1( @@ -31,7 +31,7 @@ void test_vssseg5e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg5e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vssseg5e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e64_v_f64m1_m( @@ -40,7 +40,7 @@ void test_vssseg5e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg5e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vssseg5e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e64_v_i64m1_m( @@ -49,7 +49,7 @@ void test_vssseg5e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vssseg5e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e64_v_u64m1_m( @@ -58,6 +58,6 @@ void test_vssseg5e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg5e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vssseg5e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e8.c index 3cb2959a7ac6..37f33c418859 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg5e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vssseg5e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vssseg5e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vssseg5e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vssseg5e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vssseg5e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vssseg5e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vssseg5e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf8( @@ -48,7 +48,7 @@ void test_vssseg5e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vssseg5e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf4( @@ -57,7 +57,7 @@ void test_vssseg5e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vssseg5e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf2( @@ -66,7 +66,7 @@ void test_vssseg5e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vssseg5e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_u8m1( @@ -75,7 +75,7 @@ void test_vssseg5e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vssseg5e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_i8mf8_m( @@ -84,7 +84,7 @@ void test_vssseg5e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vssseg5e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_i8mf4_m( @@ -93,7 +93,7 @@ void test_vssseg5e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vssseg5e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_i8mf2_m( @@ -102,7 +102,7 @@ void test_vssseg5e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vssseg5e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_i8m1_m( @@ -111,7 +111,7 @@ void test_vssseg5e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vssseg5e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf8_m( @@ -120,7 +120,7 @@ void test_vssseg5e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vssseg5e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf4_m( @@ -129,7 +129,7 @@ void test_vssseg5e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vssseg5e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_u8mf2_m( @@ -138,7 +138,7 @@ void test_vssseg5e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vssseg5e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vssseg5e8_v_u8m1_m( @@ -147,6 +147,6 @@ void test_vssseg5e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg5e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vssseg5e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); + return __riscv_vssseg5e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e16.c index 98462937bebf..fd91eeb5eab3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vssseg6e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vssseg6e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vssseg6e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vssseg6e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vssseg6e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_i16mf4( @@ -40,7 +40,7 @@ void test_vssseg6e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vssseg6e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_i16mf2( @@ -49,7 +49,7 @@ void test_vssseg6e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vssseg6e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_i16m1( @@ -58,7 +58,7 @@ void test_vssseg6e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vssseg6e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_u16mf4( @@ -67,7 +67,7 @@ void test_vssseg6e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vssseg6e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_u16mf2( @@ -76,7 +76,7 @@ void test_vssseg6e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vssseg6e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_u16m1( @@ -85,7 +85,7 @@ void test_vssseg6e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vssseg6e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4_m( @@ -94,7 +94,7 @@ void test_vssseg6e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vssseg6e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2_m( @@ -103,7 +103,7 @@ void test_vssseg6e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vssseg6e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1_m( @@ -112,7 +112,7 @@ void test_vssseg6e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vssseg6e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_i16mf4_m( @@ -121,7 +121,7 @@ void test_vssseg6e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vssseg6e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_i16mf2_m( @@ -130,7 +130,7 @@ void test_vssseg6e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vssseg6e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_i16m1_m( @@ -139,7 +139,7 @@ void test_vssseg6e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vssseg6e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_u16mf4_m( @@ -148,7 +148,7 @@ void test_vssseg6e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vssseg6e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_u16mf2_m( @@ -157,7 +157,7 @@ void test_vssseg6e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vssseg6e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e16_v_u16m1_m( @@ -166,6 +166,6 @@ void test_vssseg6e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg6e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vssseg6e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e32.c index 25fcd837d1d7..c03ed5d397ee 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vssseg6e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vssseg6e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vssseg6e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e32_v_i32mf2( @@ -31,7 +31,7 @@ void test_vssseg6e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vssseg6e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e32_v_i32m1( @@ -40,7 +40,7 @@ void test_vssseg6e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vssseg6e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e32_v_u32mf2( @@ -49,7 +49,7 @@ void test_vssseg6e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vssseg6e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e32_v_u32m1( @@ -58,7 +58,7 @@ void test_vssseg6e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vssseg6e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e32_v_f32mf2_m( @@ -67,7 +67,7 @@ void test_vssseg6e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vssseg6e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e32_v_f32m1_m( @@ -76,7 +76,7 @@ void test_vssseg6e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vssseg6e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e32_v_i32mf2_m( @@ -85,7 +85,7 @@ void test_vssseg6e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vssseg6e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e32_v_i32m1_m( @@ -94,7 +94,7 @@ void test_vssseg6e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vssseg6e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e32_v_u32mf2_m( @@ -103,7 +103,7 @@ void test_vssseg6e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vssseg6e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e32_v_u32m1_m( @@ -112,6 +112,6 @@ void test_vssseg6e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg6e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vssseg6e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e64.c index 712d32ada690..0dff3c21dc14 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg6e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vssseg6e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e64_v_i64m1( @@ -22,7 +22,7 @@ void test_vssseg6e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vssseg6e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vssseg6e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e64_v_u64m1( @@ -31,7 +31,7 @@ void test_vssseg6e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg6e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vssseg6e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e64_v_f64m1_m( @@ -40,7 +40,7 @@ void test_vssseg6e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg6e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vssseg6e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e64_v_i64m1_m( @@ -49,7 +49,7 @@ void test_vssseg6e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vssseg6e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e64_v_u64m1_m( @@ -58,6 +58,6 @@ void test_vssseg6e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg6e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vssseg6e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e8.c index cecd6392e5ae..a42e1f47f6d3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg6e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vssseg6e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vssseg6e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vssseg6e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vssseg6e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vssseg6e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vssseg6e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vssseg6e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf8( @@ -48,7 +48,7 @@ void test_vssseg6e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vssseg6e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf4( @@ -57,7 +57,7 @@ void test_vssseg6e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vssseg6e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf2( @@ -66,7 +66,7 @@ void test_vssseg6e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vssseg6e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_u8m1( @@ -75,7 +75,7 @@ void test_vssseg6e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vssseg6e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_i8mf8_m( @@ -84,7 +84,7 @@ void test_vssseg6e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vssseg6e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_i8mf4_m( @@ -93,7 +93,7 @@ void test_vssseg6e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vssseg6e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_i8mf2_m( @@ -102,7 +102,7 @@ void test_vssseg6e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vssseg6e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_i8m1_m( @@ -111,7 +111,7 @@ void test_vssseg6e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vssseg6e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf8_m( @@ -120,7 +120,7 @@ void test_vssseg6e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vssseg6e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf4_m( @@ -129,7 +129,7 @@ void test_vssseg6e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vssseg6e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_u8mf2_m( @@ -138,7 +138,7 @@ void test_vssseg6e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vssseg6e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vssseg6e8_v_u8m1_m( @@ -147,6 +147,6 @@ void test_vssseg6e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg6e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vssseg6e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vssseg6e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e16.c index 5fbe17427a30..bba913c3fb4f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vssseg7e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vssseg7e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vssseg7e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vssseg7e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vssseg7e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_i16mf4( @@ -40,7 +40,7 @@ void test_vssseg7e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vssseg7e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_i16mf2( @@ -49,7 +49,7 @@ void test_vssseg7e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vssseg7e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_i16m1( @@ -58,7 +58,7 @@ void test_vssseg7e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vssseg7e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_u16mf4( @@ -67,7 +67,7 @@ void test_vssseg7e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vssseg7e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_u16mf2( @@ -76,7 +76,7 @@ void test_vssseg7e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vssseg7e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_u16m1( @@ -85,7 +85,7 @@ void test_vssseg7e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vssseg7e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4_m( @@ -94,7 +94,7 @@ void test_vssseg7e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vssseg7e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2_m( @@ -103,7 +103,7 @@ void test_vssseg7e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vssseg7e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1_m( @@ -112,7 +112,7 @@ void test_vssseg7e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vssseg7e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_i16mf4_m( @@ -121,7 +121,7 @@ void test_vssseg7e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vssseg7e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_i16mf2_m( @@ -130,7 +130,7 @@ void test_vssseg7e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vssseg7e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_i16m1_m( @@ -139,7 +139,7 @@ void test_vssseg7e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vssseg7e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_u16mf4_m( @@ -148,7 +148,7 @@ void test_vssseg7e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vssseg7e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_u16mf2_m( @@ -157,7 +157,7 @@ void test_vssseg7e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vssseg7e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e16_v_u16m1_m( @@ -166,6 +166,6 @@ void test_vssseg7e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg7e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vssseg7e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e32.c index c7a2d9609794..6346b5b540f2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vssseg7e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vssseg7e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vssseg7e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e32_v_i32mf2( @@ -31,7 +31,7 @@ void test_vssseg7e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vssseg7e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e32_v_i32m1( @@ -40,7 +40,7 @@ void test_vssseg7e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vssseg7e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e32_v_u32mf2( @@ -49,7 +49,7 @@ void test_vssseg7e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vssseg7e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e32_v_u32m1( @@ -58,7 +58,7 @@ void test_vssseg7e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vssseg7e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e32_v_f32mf2_m( @@ -67,7 +67,7 @@ void test_vssseg7e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vssseg7e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e32_v_f32m1_m( @@ -76,7 +76,7 @@ void test_vssseg7e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vssseg7e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e32_v_i32mf2_m( @@ -85,7 +85,7 @@ void test_vssseg7e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vssseg7e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e32_v_i32m1_m( @@ -94,7 +94,7 @@ void test_vssseg7e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vssseg7e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e32_v_u32mf2_m( @@ -103,7 +103,7 @@ void test_vssseg7e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vssseg7e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e32_v_u32m1_m( @@ -112,6 +112,6 @@ void test_vssseg7e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg7e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vssseg7e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e64.c index 596f02cec027..129a378e6513 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg7e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vssseg7e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e64_v_i64m1( @@ -22,7 +22,7 @@ void test_vssseg7e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vssseg7e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vssseg7e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e64_v_u64m1( @@ -31,7 +31,7 @@ void test_vssseg7e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg7e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vssseg7e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e64_v_f64m1_m( @@ -40,7 +40,7 @@ void test_vssseg7e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg7e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vssseg7e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e64_v_i64m1_m( @@ -49,7 +49,7 @@ void test_vssseg7e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vssseg7e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e64_v_u64m1_m( @@ -58,6 +58,6 @@ void test_vssseg7e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg7e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vssseg7e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e8.c index 5d8eb722b267..bfda5d9f0617 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg7e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vssseg7e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vssseg7e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vssseg7e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vssseg7e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vssseg7e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vssseg7e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vssseg7e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf8( @@ -48,7 +48,7 @@ void test_vssseg7e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vssseg7e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf4( @@ -57,7 +57,7 @@ void test_vssseg7e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vssseg7e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf2( @@ -66,7 +66,7 @@ void test_vssseg7e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vssseg7e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_u8m1( @@ -75,7 +75,7 @@ void test_vssseg7e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vssseg7e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_i8mf8_m( @@ -84,7 +84,7 @@ void test_vssseg7e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vssseg7e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_i8mf4_m( @@ -93,7 +93,7 @@ void test_vssseg7e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vssseg7e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_i8mf2_m( @@ -102,7 +102,7 @@ void test_vssseg7e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vssseg7e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_i8m1_m( @@ -111,7 +111,7 @@ void test_vssseg7e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vssseg7e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf8_m( @@ -120,7 +120,7 @@ void test_vssseg7e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vssseg7e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf4_m( @@ -129,7 +129,7 @@ void test_vssseg7e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vssseg7e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_u8mf2_m( @@ -138,7 +138,7 @@ void test_vssseg7e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vssseg7e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vssseg7e8_v_u8m1_m( @@ -147,6 +147,6 @@ void test_vssseg7e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg7e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vssseg7e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vssseg7e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e16.c index 6005e040e76f..3f3e13c75a58 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vssseg8e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vssseg8e16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vssseg8e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1( @@ -31,7 +31,7 @@ void test_vssseg8e16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vssseg8e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_i16mf4( @@ -40,7 +40,7 @@ void test_vssseg8e16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vssseg8e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_i16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_i16mf2( @@ -49,7 +49,7 @@ void test_vssseg8e16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vssseg8e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_i16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_i16m1( @@ -58,7 +58,7 @@ void test_vssseg8e16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vssseg8e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_i16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_u16mf4( @@ -67,7 +67,7 @@ void test_vssseg8e16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vssseg8e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_u16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_u16mf2( @@ -76,7 +76,7 @@ void test_vssseg8e16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vssseg8e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_u16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_u16m1( @@ -85,7 +85,7 @@ void test_vssseg8e16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vssseg8e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_u16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4_m( @@ -94,7 +94,7 @@ void test_vssseg8e16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vssseg8e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2_m( @@ -103,7 +103,7 @@ void test_vssseg8e16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vssseg8e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1_m( @@ -112,7 +112,7 @@ void test_vssseg8e16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vssseg8e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_i16mf4_m( @@ -121,7 +121,7 @@ void test_vssseg8e16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vssseg8e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_i16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_i16mf2_m( @@ -130,7 +130,7 @@ void test_vssseg8e16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vssseg8e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_i16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_i16m1_m( @@ -139,7 +139,7 @@ void test_vssseg8e16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vssseg8e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_i16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_u16mf4_m( @@ -148,7 +148,7 @@ void test_vssseg8e16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vssseg8e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_u16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_u16mf2_m( @@ -157,7 +157,7 @@ void test_vssseg8e16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vssseg8e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_u16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e16_v_u16m1_m( @@ -166,6 +166,6 @@ void test_vssseg8e16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg8e16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vssseg8e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e16_v_u16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e32.c index 56394eed2fdd..90cca9073047 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vssseg8e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_f32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e32_v_f32m1( @@ -22,7 +22,7 @@ void test_vssseg8e32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vssseg8e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_f32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e32_v_i32mf2( @@ -31,7 +31,7 @@ void test_vssseg8e32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t v0, vf // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vssseg8e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_i32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e32_v_i32m1( @@ -40,7 +40,7 @@ void test_vssseg8e32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vssseg8e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_i32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e32_v_u32mf2( @@ -49,7 +49,7 @@ void test_vssseg8e32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vssseg8e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_u32mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e32_v_u32m1( @@ -58,7 +58,7 @@ void test_vssseg8e32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vssseg8e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_u32m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e32_v_f32mf2_m( @@ -67,7 +67,7 @@ void test_vssseg8e32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vssseg8e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e32_v_f32m1_m( @@ -76,7 +76,7 @@ void test_vssseg8e32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vssseg8e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_f32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e32_v_i32mf2_m( @@ -85,7 +85,7 @@ void test_vssseg8e32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vssseg8e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_i32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e32_v_i32m1_m( @@ -94,7 +94,7 @@ void test_vssseg8e32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vssseg8e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_i32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e32_v_u32mf2_m( @@ -103,7 +103,7 @@ void test_vssseg8e32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vssseg8e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_u32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e32_v_u32m1_m( @@ -112,6 +112,6 @@ void test_vssseg8e32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstrid // CHECK-RV64-NEXT: ret void // void test_vssseg8e32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vssseg8e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e32_v_u32m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e64.c index bf0939fccdff..5ff9b68bf55c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg8e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vssseg8e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e64_v_f64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e64_v_i64m1( @@ -22,7 +22,7 @@ void test_vssseg8e64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t v0, v // CHECK-RV64-NEXT: ret void // void test_vssseg8e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vssseg8e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e64_v_i64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e64_v_u64m1( @@ -31,7 +31,7 @@ void test_vssseg8e64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vssseg8e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vssseg8e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e64_v_u64m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e64_v_f64m1_m( @@ -40,7 +40,7 @@ void test_vssseg8e64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vssseg8e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vssseg8e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e64_v_f64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e64_v_i64m1_m( @@ -49,7 +49,7 @@ void test_vssseg8e64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vssseg8e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e64_v_i64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e64_v_u64m1_m( @@ -58,6 +58,6 @@ void test_vssseg8e64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg8e64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vssseg8e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e64_v_u64m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e8.c index 4f20c6becb40..5467abf715ab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssseg8e8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vssseg8e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_i8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_i8mf4( @@ -21,7 +21,7 @@ void test_vssseg8e8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vssseg8e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_i8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_i8mf2( @@ -30,7 +30,7 @@ void test_vssseg8e8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vssseg8e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_i8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_i8m1( @@ -39,7 +39,7 @@ void test_vssseg8e8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vssseg8e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_i8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf8( @@ -48,7 +48,7 @@ void test_vssseg8e8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vssseg8e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_u8mf8(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf4( @@ -57,7 +57,7 @@ void test_vssseg8e8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vssseg8e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_u8mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf2( @@ -66,7 +66,7 @@ void test_vssseg8e8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vssseg8e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_u8mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_u8m1( @@ -75,7 +75,7 @@ void test_vssseg8e8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vssseg8e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_u8m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_i8mf8_m( @@ -84,7 +84,7 @@ void test_vssseg8e8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuin // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vssseg8e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_i8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_i8mf4_m( @@ -93,7 +93,7 @@ void test_vssseg8e8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vssseg8e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_i8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_i8mf2_m( @@ -102,7 +102,7 @@ void test_vssseg8e8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vssseg8e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_i8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_i8m1_m( @@ -111,7 +111,7 @@ void test_vssseg8e8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, v // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vssseg8e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_i8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf8_m( @@ -120,7 +120,7 @@ void test_vssseg8e8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vin // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vssseg8e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_u8mf8_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf4_m( @@ -129,7 +129,7 @@ void test_vssseg8e8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vssseg8e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_u8mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_u8mf2_m( @@ -138,7 +138,7 @@ void test_vssseg8e8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vssseg8e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_u8mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vssseg8e8_v_u8m1_m( @@ -147,6 +147,6 @@ void test_vssseg8e8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: ret void // void test_vssseg8e8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vssseg8e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vssseg8e8_v_u8m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c index fbcfef0247ec..874b615134fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssub.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vssub_vv_i8mf8(op1, op2, vl); + return __riscv_vssub_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf8(op1, op2, vl); + return __riscv_vssub_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vssub_vv_i8mf4(op1, op2, vl); + return __riscv_vssub_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf4(op1, op2, vl); + return __riscv_vssub_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vssub_vv_i8mf2(op1, op2, vl); + return __riscv_vssub_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf2(op1, op2, vl); + return __riscv_vssub_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vssub_vv_i8m1(op1, op2, vl); + return __riscv_vssub_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m1(op1, op2, vl); + return __riscv_vssub_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vssub_vv_i8m2(op1, op2, vl); + return __riscv_vssub_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m2(op1, op2, vl); + return __riscv_vssub_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vssub_vv_i8m4(op1, op2, vl); + return __riscv_vssub_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m4(op1, op2, vl); + return __riscv_vssub_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vssub_vv_i8m8(op1, op2, vl); + return __riscv_vssub_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m8(op1, op2, vl); + return __riscv_vssub_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vssub_vv_i16mf4(op1, op2, vl); + return __riscv_vssub_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf4(op1, op2, vl); + return __riscv_vssub_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vssub_vv_i16mf2(op1, op2, vl); + return __riscv_vssub_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf2(op1, op2, vl); + return __riscv_vssub_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vssub_vv_i16m1(op1, op2, vl); + return __riscv_vssub_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m1(op1, op2, vl); + return __riscv_vssub_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vssub_vv_i16m2(op1, op2, vl); + return __riscv_vssub_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m2(op1, op2, vl); + return __riscv_vssub_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vssub_vv_i16m4(op1, op2, vl); + return __riscv_vssub_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m4(op1, op2, vl); + return __riscv_vssub_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vssub_vv_i16m8(op1, op2, vl); + return __riscv_vssub_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m8(op1, op2, vl); + return __riscv_vssub_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vssub_vv_i32mf2(op1, op2, vl); + return __riscv_vssub_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32mf2(op1, op2, vl); + return __riscv_vssub_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vssub_vv_i32m1(op1, op2, vl); + return __riscv_vssub_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m1(op1, op2, vl); + return __riscv_vssub_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vssub_vv_i32m2(op1, op2, vl); + return __riscv_vssub_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m2(op1, op2, vl); + return __riscv_vssub_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vssub_vv_i32m4(op1, op2, vl); + return __riscv_vssub_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m4(op1, op2, vl); + return __riscv_vssub_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vssub_vv_i32m8(op1, op2, vl); + return __riscv_vssub_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m8(op1, op2, vl); + return __riscv_vssub_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vssub_vv_i64m1(op1, op2, vl); + return __riscv_vssub_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m1(op1, op2, vl); + return __riscv_vssub_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vssub_vv_i64m2(op1, op2, vl); + return __riscv_vssub_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m2(op1, op2, vl); + return __riscv_vssub_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vssub_vv_i64m4(op1, op2, vl); + return __riscv_vssub_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m4(op1, op2, vl); + return __riscv_vssub_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vssub_vv_i64m8(op1, op2, vl); + return __riscv_vssub_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m8(op1, op2, vl); + return __riscv_vssub_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m( @@ -408,7 +408,7 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vssub_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m( @@ -417,7 +417,7 @@ vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m( @@ -426,7 +426,7 @@ vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vssub_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m( @@ -435,7 +435,7 @@ vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m( @@ -444,7 +444,7 @@ vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vssub_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m( @@ -453,7 +453,7 @@ vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m( @@ -462,7 +462,7 @@ vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vssub_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m( @@ -471,7 +471,7 @@ vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m( @@ -480,7 +480,7 @@ vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vssub_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m( @@ -489,7 +489,7 @@ vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m( @@ -498,7 +498,7 @@ vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vssub_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m( @@ -507,7 +507,7 @@ vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m( @@ -516,7 +516,7 @@ vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vssub_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m( @@ -525,7 +525,7 @@ vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m( @@ -534,7 +534,7 @@ vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vssub_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m( @@ -543,7 +543,7 @@ vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m( @@ -552,7 +552,7 @@ vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vssub_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m( @@ -561,7 +561,7 @@ vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m( @@ -570,7 +570,7 @@ vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vssub_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m( @@ -579,7 +579,7 @@ vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m( @@ -588,7 +588,7 @@ vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vssub_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m( @@ -597,7 +597,7 @@ vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m( @@ -606,7 +606,7 @@ vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vssub_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m( @@ -615,7 +615,7 @@ vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m( @@ -624,7 +624,7 @@ vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vssub_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m( @@ -633,7 +633,7 @@ vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m( @@ -642,7 +642,7 @@ vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vssub_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m( @@ -651,7 +651,7 @@ vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m( @@ -660,7 +660,7 @@ vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vssub_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m( @@ -669,7 +669,7 @@ vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m( @@ -678,7 +678,7 @@ vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vssub_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m( @@ -687,7 +687,7 @@ vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m( @@ -696,7 +696,7 @@ vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vssub_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m( @@ -705,7 +705,7 @@ vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m( @@ -714,7 +714,7 @@ vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vssub_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m( @@ -723,7 +723,7 @@ vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m( @@ -732,7 +732,7 @@ vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vssub_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m( @@ -741,7 +741,7 @@ vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m( @@ -750,7 +750,7 @@ vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vssub_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m( @@ -759,7 +759,7 @@ vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m( @@ -768,7 +768,7 @@ vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vssub_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m( @@ -777,7 +777,7 @@ vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m( @@ -786,7 +786,7 @@ vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vssub_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vssub_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m( @@ -795,6 +795,6 @@ vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vssub_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c index 892aa65cb6e3..aae05d895981 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vssubu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vssubu_vv_u8mf8(op1, op2, vl); + return __riscv_vssubu_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf8(op1, op2, vl); + return __riscv_vssubu_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vssubu_vv_u8mf4(op1, op2, vl); + return __riscv_vssubu_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf4(op1, op2, vl); + return __riscv_vssubu_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vssubu_vv_u8mf2(op1, op2, vl); + return __riscv_vssubu_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf2(op1, op2, vl); + return __riscv_vssubu_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vssubu_vv_u8m1(op1, op2, vl); + return __riscv_vssubu_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m1(op1, op2, vl); + return __riscv_vssubu_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vssubu_vv_u8m2(op1, op2, vl); + return __riscv_vssubu_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m2(op1, op2, vl); + return __riscv_vssubu_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vssubu_vv_u8m4(op1, op2, vl); + return __riscv_vssubu_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m4(op1, op2, vl); + return __riscv_vssubu_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vssubu_vv_u8m8(op1, op2, vl); + return __riscv_vssubu_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m8(op1, op2, vl); + return __riscv_vssubu_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vssubu_vv_u16mf4(op1, op2, vl); + return __riscv_vssubu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf4(op1, op2, vl); + return __riscv_vssubu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vssubu_vv_u16mf2(op1, op2, vl); + return __riscv_vssubu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf2(op1, op2, vl); + return __riscv_vssubu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vssubu_vv_u16m1(op1, op2, vl); + return __riscv_vssubu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m1(op1, op2, vl); + return __riscv_vssubu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vssubu_vv_u16m2(op1, op2, vl); + return __riscv_vssubu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m2(op1, op2, vl); + return __riscv_vssubu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vssubu_vv_u16m4(op1, op2, vl); + return __riscv_vssubu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m4(op1, op2, vl); + return __riscv_vssubu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vssubu_vv_u16m8(op1, op2, vl); + return __riscv_vssubu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m8(op1, op2, vl); + return __riscv_vssubu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vssubu_vv_u32mf2(op1, op2, vl); + return __riscv_vssubu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32mf2(op1, op2, vl); + return __riscv_vssubu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vssubu_vv_u32m1(op1, op2, vl); + return __riscv_vssubu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m1(op1, op2, vl); + return __riscv_vssubu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vssubu_vv_u32m2(op1, op2, vl); + return __riscv_vssubu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m2(op1, op2, vl); + return __riscv_vssubu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vssubu_vv_u32m4(op1, op2, vl); + return __riscv_vssubu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m4(op1, op2, vl); + return __riscv_vssubu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vssubu_vv_u32m8(op1, op2, vl); + return __riscv_vssubu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m8(op1, op2, vl); + return __riscv_vssubu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1( @@ -336,7 +336,7 @@ vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vssubu_vv_u64m1(op1, op2, vl); + return __riscv_vssubu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1( @@ -345,7 +345,7 @@ vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m1(op1, op2, vl); + return __riscv_vssubu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2( @@ -354,7 +354,7 @@ vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vssubu_vv_u64m2(op1, op2, vl); + return __riscv_vssubu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2( @@ -363,7 +363,7 @@ vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m2(op1, op2, vl); + return __riscv_vssubu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4( @@ -372,7 +372,7 @@ vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vssubu_vv_u64m4(op1, op2, vl); + return __riscv_vssubu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4( @@ -381,7 +381,7 @@ vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m4(op1, op2, vl); + return __riscv_vssubu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8( @@ -390,7 +390,7 @@ vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vssubu_vv_u64m8(op1, op2, vl); + return __riscv_vssubu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8( @@ -399,7 +399,7 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m8(op1, op2, vl); + return __riscv_vssubu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m( @@ -408,7 +408,7 @@ vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vssubu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m( @@ -417,7 +417,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m( @@ -426,7 +426,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vssubu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m( @@ -435,7 +435,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m( @@ -444,7 +444,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vssubu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m( @@ -453,7 +453,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m( @@ -462,7 +462,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vssubu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m( @@ -471,7 +471,7 @@ vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m( @@ -480,7 +480,7 @@ vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vssubu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m( @@ -489,7 +489,7 @@ vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m( @@ -498,7 +498,7 @@ vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vssubu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m( @@ -507,7 +507,7 @@ vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m( @@ -516,7 +516,7 @@ vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vssubu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m( @@ -525,7 +525,7 @@ vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m( @@ -534,7 +534,7 @@ vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vssubu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m( @@ -543,7 +543,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m( @@ -552,7 +552,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vssubu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m( @@ -561,7 +561,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m( @@ -570,7 +570,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vssubu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m( @@ -579,7 +579,7 @@ vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m( @@ -588,7 +588,7 @@ vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vssubu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m( @@ -597,7 +597,7 @@ vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m( @@ -606,7 +606,7 @@ vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vssubu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m( @@ -615,7 +615,7 @@ vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m( @@ -624,7 +624,7 @@ vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vssubu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m( @@ -633,7 +633,7 @@ vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m( @@ -642,7 +642,7 @@ vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vssubu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m( @@ -651,7 +651,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m( @@ -660,7 +660,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vssubu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m( @@ -669,7 +669,7 @@ vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m( @@ -678,7 +678,7 @@ vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vssubu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m( @@ -687,7 +687,7 @@ vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m( @@ -696,7 +696,7 @@ vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vssubu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m( @@ -705,7 +705,7 @@ vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m( @@ -714,7 +714,7 @@ vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vssubu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m( @@ -723,7 +723,7 @@ vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m( @@ -732,7 +732,7 @@ vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vssubu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m( @@ -741,7 +741,7 @@ vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m( @@ -750,7 +750,7 @@ vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vssubu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m( @@ -759,7 +759,7 @@ vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m( @@ -768,7 +768,7 @@ vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vssubu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m( @@ -777,7 +777,7 @@ vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m( @@ -786,7 +786,7 @@ vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vssubu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m( @@ -795,6 +795,6 @@ vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vssubu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsub.c index 8937d867eeea..8d62a25a6c10 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsub.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsub_vv_i8mf8(op1, op2, vl); + return __riscv_vsub_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf8(op1, op2, vl); + return __riscv_vsub_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsub_vv_i8mf4(op1, op2, vl); + return __riscv_vsub_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf4(op1, op2, vl); + return __riscv_vsub_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsub_vv_i8mf2(op1, op2, vl); + return __riscv_vsub_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf2(op1, op2, vl); + return __riscv_vsub_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsub_vv_i8m1(op1, op2, vl); + return __riscv_vsub_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m1(op1, op2, vl); + return __riscv_vsub_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsub_vv_i8m2(op1, op2, vl); + return __riscv_vsub_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m2(op1, op2, vl); + return __riscv_vsub_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsub_vv_i8m4(op1, op2, vl); + return __riscv_vsub_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m4(op1, op2, vl); + return __riscv_vsub_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsub_vv_i8m8(op1, op2, vl); + return __riscv_vsub_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m8(op1, op2, vl); + return __riscv_vsub_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsub_vv_i16mf4(op1, op2, vl); + return __riscv_vsub_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf4(op1, op2, vl); + return __riscv_vsub_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsub_vv_i16mf2(op1, op2, vl); + return __riscv_vsub_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf2(op1, op2, vl); + return __riscv_vsub_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsub_vv_i16m1(op1, op2, vl); + return __riscv_vsub_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m1(op1, op2, vl); + return __riscv_vsub_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsub_vv_i16m2(op1, op2, vl); + return __riscv_vsub_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m2(op1, op2, vl); + return __riscv_vsub_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsub_vv_i16m4(op1, op2, vl); + return __riscv_vsub_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m4(op1, op2, vl); + return __riscv_vsub_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsub_vv_i16m8(op1, op2, vl); + return __riscv_vsub_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m8(op1, op2, vl); + return __riscv_vsub_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsub_vv_i32mf2(op1, op2, vl); + return __riscv_vsub_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32mf2(op1, op2, vl); + return __riscv_vsub_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsub_vv_i32m1(op1, op2, vl); + return __riscv_vsub_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m1(op1, op2, vl); + return __riscv_vsub_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsub_vv_i32m2(op1, op2, vl); + return __riscv_vsub_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m2(op1, op2, vl); + return __riscv_vsub_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsub_vv_i32m4(op1, op2, vl); + return __riscv_vsub_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m4(op1, op2, vl); + return __riscv_vsub_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsub_vv_i32m8(op1, op2, vl); + return __riscv_vsub_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m8(op1, op2, vl); + return __riscv_vsub_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsub_vv_i64m1(op1, op2, vl); + return __riscv_vsub_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m1(op1, op2, vl); + return __riscv_vsub_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsub_vv_i64m2(op1, op2, vl); + return __riscv_vsub_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m2(op1, op2, vl); + return __riscv_vsub_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsub_vv_i64m4(op1, op2, vl); + return __riscv_vsub_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m4(op1, op2, vl); + return __riscv_vsub_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsub_vv_i64m8(op1, op2, vl); + return __riscv_vsub_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m8(op1, op2, vl); + return __riscv_vsub_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8( @@ -408,7 +408,7 @@ vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsub_vv_u8mf8(op1, op2, vl); + return __riscv_vsub_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8( @@ -417,7 +417,7 @@ vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf8(op1, op2, vl); + return __riscv_vsub_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4( @@ -426,7 +426,7 @@ vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsub_vv_u8mf4(op1, op2, vl); + return __riscv_vsub_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4( @@ -435,7 +435,7 @@ vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf4(op1, op2, vl); + return __riscv_vsub_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2( @@ -444,7 +444,7 @@ vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsub_vv_u8mf2(op1, op2, vl); + return __riscv_vsub_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2( @@ -453,7 +453,7 @@ vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf2(op1, op2, vl); + return __riscv_vsub_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m1( @@ -462,7 +462,7 @@ vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsub_vv_u8m1(op1, op2, vl); + return __riscv_vsub_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m1( @@ -471,7 +471,7 @@ vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m1(op1, op2, vl); + return __riscv_vsub_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m2( @@ -480,7 +480,7 @@ vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsub_vv_u8m2(op1, op2, vl); + return __riscv_vsub_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m2( @@ -489,7 +489,7 @@ vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m2(op1, op2, vl); + return __riscv_vsub_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m4( @@ -498,7 +498,7 @@ vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsub_vv_u8m4(op1, op2, vl); + return __riscv_vsub_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m4( @@ -507,7 +507,7 @@ vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m4(op1, op2, vl); + return __riscv_vsub_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m8( @@ -516,7 +516,7 @@ vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsub_vv_u8m8(op1, op2, vl); + return __riscv_vsub_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m8( @@ -525,7 +525,7 @@ vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m8(op1, op2, vl); + return __riscv_vsub_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4( @@ -534,7 +534,7 @@ vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsub_vv_u16mf4(op1, op2, vl); + return __riscv_vsub_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4( @@ -543,7 +543,7 @@ vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf4(op1, op2, vl); + return __riscv_vsub_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2( @@ -552,7 +552,7 @@ vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsub_vv_u16mf2(op1, op2, vl); + return __riscv_vsub_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2( @@ -561,7 +561,7 @@ vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf2(op1, op2, vl); + return __riscv_vsub_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m1( @@ -570,7 +570,7 @@ vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsub_vv_u16m1(op1, op2, vl); + return __riscv_vsub_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m1( @@ -579,7 +579,7 @@ vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m1(op1, op2, vl); + return __riscv_vsub_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m2( @@ -588,7 +588,7 @@ vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsub_vv_u16m2(op1, op2, vl); + return __riscv_vsub_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m2( @@ -597,7 +597,7 @@ vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m2(op1, op2, vl); + return __riscv_vsub_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m4( @@ -606,7 +606,7 @@ vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsub_vv_u16m4(op1, op2, vl); + return __riscv_vsub_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m4( @@ -615,7 +615,7 @@ vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m4(op1, op2, vl); + return __riscv_vsub_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m8( @@ -624,7 +624,7 @@ vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsub_vv_u16m8(op1, op2, vl); + return __riscv_vsub_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m8( @@ -633,7 +633,7 @@ vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m8(op1, op2, vl); + return __riscv_vsub_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2( @@ -642,7 +642,7 @@ vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsub_vv_u32mf2(op1, op2, vl); + return __riscv_vsub_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2( @@ -651,7 +651,7 @@ vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32mf2(op1, op2, vl); + return __riscv_vsub_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m1( @@ -660,7 +660,7 @@ vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsub_vv_u32m1(op1, op2, vl); + return __riscv_vsub_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m1( @@ -669,7 +669,7 @@ vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m1(op1, op2, vl); + return __riscv_vsub_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m2( @@ -678,7 +678,7 @@ vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsub_vv_u32m2(op1, op2, vl); + return __riscv_vsub_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m2( @@ -687,7 +687,7 @@ vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m2(op1, op2, vl); + return __riscv_vsub_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m4( @@ -696,7 +696,7 @@ vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsub_vv_u32m4(op1, op2, vl); + return __riscv_vsub_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m4( @@ -705,7 +705,7 @@ vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m4(op1, op2, vl); + return __riscv_vsub_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m8( @@ -714,7 +714,7 @@ vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsub_vv_u32m8(op1, op2, vl); + return __riscv_vsub_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m8( @@ -723,7 +723,7 @@ vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m8(op1, op2, vl); + return __riscv_vsub_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m1( @@ -732,7 +732,7 @@ vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsub_vv_u64m1(op1, op2, vl); + return __riscv_vsub_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m1( @@ -741,7 +741,7 @@ vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m1(op1, op2, vl); + return __riscv_vsub_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m2( @@ -750,7 +750,7 @@ vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsub_vv_u64m2(op1, op2, vl); + return __riscv_vsub_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m2( @@ -759,7 +759,7 @@ vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m2(op1, op2, vl); + return __riscv_vsub_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m4( @@ -768,7 +768,7 @@ vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsub_vv_u64m4(op1, op2, vl); + return __riscv_vsub_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m4( @@ -777,7 +777,7 @@ vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m4(op1, op2, vl); + return __riscv_vsub_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m8( @@ -786,7 +786,7 @@ vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsub_vv_u64m8(op1, op2, vl); + return __riscv_vsub_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m8( @@ -795,7 +795,7 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m8(op1, op2, vl); + return __riscv_vsub_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m( @@ -804,7 +804,7 @@ vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsub_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m( @@ -813,7 +813,7 @@ vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m( @@ -822,7 +822,7 @@ vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsub_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m( @@ -831,7 +831,7 @@ vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m( @@ -840,7 +840,7 @@ vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsub_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m( @@ -849,7 +849,7 @@ vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m( @@ -858,7 +858,7 @@ vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsub_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m( @@ -867,7 +867,7 @@ vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m( @@ -876,7 +876,7 @@ vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsub_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m( @@ -885,7 +885,7 @@ vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m( @@ -894,7 +894,7 @@ vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsub_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m( @@ -903,7 +903,7 @@ vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m( @@ -912,7 +912,7 @@ vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsub_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m( @@ -921,7 +921,7 @@ vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m( @@ -930,7 +930,7 @@ vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsub_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m( @@ -939,7 +939,7 @@ vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m( @@ -948,7 +948,7 @@ vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsub_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m( @@ -957,7 +957,7 @@ vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m( @@ -966,7 +966,7 @@ vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsub_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m( @@ -975,7 +975,7 @@ vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m( @@ -984,7 +984,7 @@ vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsub_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m( @@ -993,7 +993,7 @@ vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m( @@ -1002,7 +1002,7 @@ vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsub_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m( @@ -1011,7 +1011,7 @@ vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m( @@ -1020,7 +1020,7 @@ vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsub_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m( @@ -1029,7 +1029,7 @@ vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m( @@ -1038,7 +1038,7 @@ vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsub_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsub_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m( @@ -1065,7 +1065,7 @@ vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m( @@ -1074,7 +1074,7 @@ vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsub_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m( @@ -1083,7 +1083,7 @@ vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m( @@ -1092,7 +1092,7 @@ vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsub_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m( @@ -1101,7 +1101,7 @@ vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m( @@ -1110,7 +1110,7 @@ vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsub_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m( @@ -1119,7 +1119,7 @@ vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m( @@ -1128,7 +1128,7 @@ vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsub_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m( @@ -1137,7 +1137,7 @@ vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m( @@ -1146,7 +1146,7 @@ vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsub_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m( @@ -1155,7 +1155,7 @@ vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m( @@ -1164,7 +1164,7 @@ vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsub_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m( @@ -1173,7 +1173,7 @@ vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m( @@ -1182,7 +1182,7 @@ vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsub_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vsub_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m( @@ -1191,7 +1191,7 @@ vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vsub_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m( @@ -1200,7 +1200,7 @@ vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsub_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsub_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsub_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsub_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsub_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsub_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsub_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsub_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsub_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsub_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsub_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsub_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsub_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsub_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsub_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsub_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsub_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsub_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsub_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsub_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsub_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsub_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vsub_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vsub_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei16.c index 995702b70ab8..56b39ca115cc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei16_v_f16mf4(base, bindex, value, vl); + return __riscv_vsuxei16_v_f16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei16_v_f16mf2(base, bindex, value, vl); + return __riscv_vsuxei16_v_f16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei16_v_f16m1(base, bindex, value, vl); + return __riscv_vsuxei16_v_f16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei16_v_f16m2(base, bindex, value, vl); + return __riscv_vsuxei16_v_f16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4( @@ -49,7 +49,7 @@ void test_vsuxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei16_v_f16m4(base, bindex, value, vl); + return __riscv_vsuxei16_v_f16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8( @@ -58,7 +58,7 @@ void test_vsuxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return vsuxei16_v_f16m8(base, bindex, value, vl); + return __riscv_vsuxei16_v_f16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2( @@ -67,7 +67,7 @@ void test_vsuxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return vsuxei16_v_f32mf2(base, bindex, value, vl); + return __riscv_vsuxei16_v_f32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1( @@ -76,7 +76,7 @@ void test_vsuxei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return vsuxei16_v_f32m1(base, bindex, value, vl); + return __riscv_vsuxei16_v_f32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2( @@ -85,7 +85,7 @@ void test_vsuxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return vsuxei16_v_f32m2(base, bindex, value, vl); + return __riscv_vsuxei16_v_f32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4( @@ -94,7 +94,7 @@ void test_vsuxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return vsuxei16_v_f32m4(base, bindex, value, vl); + return __riscv_vsuxei16_v_f32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8( @@ -103,7 +103,7 @@ void test_vsuxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return vsuxei16_v_f32m8(base, bindex, value, vl); + return __riscv_vsuxei16_v_f32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1( @@ -112,7 +112,7 @@ void test_vsuxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return vsuxei16_v_f64m1(base, bindex, value, vl); + return __riscv_vsuxei16_v_f64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2( @@ -121,7 +121,7 @@ void test_vsuxei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return vsuxei16_v_f64m2(base, bindex, value, vl); + return __riscv_vsuxei16_v_f64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4( @@ -130,7 +130,7 @@ void test_vsuxei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return vsuxei16_v_f64m4(base, bindex, value, vl); + return __riscv_vsuxei16_v_f64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8( @@ -139,7 +139,7 @@ void test_vsuxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return vsuxei16_v_f64m8(base, bindex, value, vl); + return __riscv_vsuxei16_v_f64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8( @@ -148,7 +148,7 @@ void test_vsuxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return vsuxei16_v_i8mf8(base, bindex, value, vl); + return __riscv_vsuxei16_v_i8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4( @@ -157,7 +157,7 @@ void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return vsuxei16_v_i8mf4(base, bindex, value, vl); + return __riscv_vsuxei16_v_i8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2( @@ -166,7 +166,7 @@ void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return vsuxei16_v_i8mf2(base, bindex, value, vl); + return __riscv_vsuxei16_v_i8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1( @@ -175,7 +175,7 @@ void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return vsuxei16_v_i8m1(base, bindex, value, vl); + return __riscv_vsuxei16_v_i8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2( @@ -184,7 +184,7 @@ void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return vsuxei16_v_i8m2(base, bindex, value, vl); + return __riscv_vsuxei16_v_i8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4( @@ -193,7 +193,7 @@ void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return vsuxei16_v_i8m4(base, bindex, value, vl); + return __riscv_vsuxei16_v_i8m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4( @@ -202,7 +202,7 @@ void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return vsuxei16_v_i16mf4(base, bindex, value, vl); + return __riscv_vsuxei16_v_i16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2( @@ -211,7 +211,7 @@ void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return vsuxei16_v_i16mf2(base, bindex, value, vl); + return __riscv_vsuxei16_v_i16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1( @@ -220,7 +220,7 @@ void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return vsuxei16_v_i16m1(base, bindex, value, vl); + return __riscv_vsuxei16_v_i16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2( @@ -229,7 +229,7 @@ void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return vsuxei16_v_i16m2(base, bindex, value, vl); + return __riscv_vsuxei16_v_i16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4( @@ -238,7 +238,7 @@ void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return vsuxei16_v_i16m4(base, bindex, value, vl); + return __riscv_vsuxei16_v_i16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8( @@ -247,7 +247,7 @@ void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return vsuxei16_v_i16m8(base, bindex, value, vl); + return __riscv_vsuxei16_v_i16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2( @@ -256,7 +256,7 @@ void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return vsuxei16_v_i32mf2(base, bindex, value, vl); + return __riscv_vsuxei16_v_i32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1( @@ -265,7 +265,7 @@ void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return vsuxei16_v_i32m1(base, bindex, value, vl); + return __riscv_vsuxei16_v_i32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2( @@ -274,7 +274,7 @@ void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return vsuxei16_v_i32m2(base, bindex, value, vl); + return __riscv_vsuxei16_v_i32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4( @@ -283,7 +283,7 @@ void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return vsuxei16_v_i32m4(base, bindex, value, vl); + return __riscv_vsuxei16_v_i32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8( @@ -292,7 +292,7 @@ void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return vsuxei16_v_i32m8(base, bindex, value, vl); + return __riscv_vsuxei16_v_i32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1( @@ -301,7 +301,7 @@ void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return vsuxei16_v_i64m1(base, bindex, value, vl); + return __riscv_vsuxei16_v_i64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2( @@ -310,7 +310,7 @@ void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return vsuxei16_v_i64m2(base, bindex, value, vl); + return __riscv_vsuxei16_v_i64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4( @@ -319,7 +319,7 @@ void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return vsuxei16_v_i64m4(base, bindex, value, vl); + return __riscv_vsuxei16_v_i64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8( @@ -328,7 +328,7 @@ void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return vsuxei16_v_i64m8(base, bindex, value, vl); + return __riscv_vsuxei16_v_i64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8( @@ -337,7 +337,7 @@ void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return vsuxei16_v_u8mf8(base, bindex, value, vl); + return __riscv_vsuxei16_v_u8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4( @@ -346,7 +346,7 @@ void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return vsuxei16_v_u8mf4(base, bindex, value, vl); + return __riscv_vsuxei16_v_u8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2( @@ -355,7 +355,7 @@ void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return vsuxei16_v_u8mf2(base, bindex, value, vl); + return __riscv_vsuxei16_v_u8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1( @@ -364,7 +364,7 @@ void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return vsuxei16_v_u8m1(base, bindex, value, vl); + return __riscv_vsuxei16_v_u8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2( @@ -373,7 +373,7 @@ void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return vsuxei16_v_u8m2(base, bindex, value, vl); + return __riscv_vsuxei16_v_u8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4( @@ -382,7 +382,7 @@ void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return vsuxei16_v_u8m4(base, bindex, value, vl); + return __riscv_vsuxei16_v_u8m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4( @@ -391,7 +391,7 @@ void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return vsuxei16_v_u16mf4(base, bindex, value, vl); + return __riscv_vsuxei16_v_u16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2( @@ -400,7 +400,7 @@ void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t va // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return vsuxei16_v_u16mf2(base, bindex, value, vl); + return __riscv_vsuxei16_v_u16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1( @@ -409,7 +409,7 @@ void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t va // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return vsuxei16_v_u16m1(base, bindex, value, vl); + return __riscv_vsuxei16_v_u16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2( @@ -418,7 +418,7 @@ void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return vsuxei16_v_u16m2(base, bindex, value, vl); + return __riscv_vsuxei16_v_u16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4( @@ -427,7 +427,7 @@ void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return vsuxei16_v_u16m4(base, bindex, value, vl); + return __riscv_vsuxei16_v_u16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8( @@ -436,7 +436,7 @@ void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return vsuxei16_v_u16m8(base, bindex, value, vl); + return __riscv_vsuxei16_v_u16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2( @@ -445,7 +445,7 @@ void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return vsuxei16_v_u32mf2(base, bindex, value, vl); + return __riscv_vsuxei16_v_u32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1( @@ -454,7 +454,7 @@ void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t va // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return vsuxei16_v_u32m1(base, bindex, value, vl); + return __riscv_vsuxei16_v_u32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2( @@ -463,7 +463,7 @@ void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return vsuxei16_v_u32m2(base, bindex, value, vl); + return __riscv_vsuxei16_v_u32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4( @@ -472,7 +472,7 @@ void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return vsuxei16_v_u32m4(base, bindex, value, vl); + return __riscv_vsuxei16_v_u32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8( @@ -481,7 +481,7 @@ void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return vsuxei16_v_u32m8(base, bindex, value, vl); + return __riscv_vsuxei16_v_u32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1( @@ -490,7 +490,7 @@ void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return vsuxei16_v_u64m1(base, bindex, value, vl); + return __riscv_vsuxei16_v_u64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2( @@ -499,7 +499,7 @@ void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return vsuxei16_v_u64m2(base, bindex, value, vl); + return __riscv_vsuxei16_v_u64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4( @@ -508,7 +508,7 @@ void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return vsuxei16_v_u64m4(base, bindex, value, vl); + return __riscv_vsuxei16_v_u64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8( @@ -517,7 +517,7 @@ void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return vsuxei16_v_u64m8(base, bindex, value, vl); + return __riscv_vsuxei16_v_u64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4_m( @@ -526,7 +526,7 @@ void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei16_v_f16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2_m( @@ -535,7 +535,7 @@ void test_vsuxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei16_v_f16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1_m( @@ -544,7 +544,7 @@ void test_vsuxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei16_v_f16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2_m( @@ -553,7 +553,7 @@ void test_vsuxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei16_v_f16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4_m( @@ -562,7 +562,7 @@ void test_vsuxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei16_v_f16m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8_m( @@ -571,7 +571,7 @@ void test_vsuxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return vsuxei16_v_f16m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2_m( @@ -580,7 +580,7 @@ void test_vsuxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return vsuxei16_v_f32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1_m( @@ -589,7 +589,7 @@ void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return vsuxei16_v_f32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2_m( @@ -598,7 +598,7 @@ void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return vsuxei16_v_f32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4_m( @@ -607,7 +607,7 @@ void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return vsuxei16_v_f32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8_m( @@ -616,7 +616,7 @@ void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return vsuxei16_v_f32m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1_m( @@ -625,7 +625,7 @@ void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return vsuxei16_v_f64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2_m( @@ -634,7 +634,7 @@ void test_vsuxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return vsuxei16_v_f64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4_m( @@ -643,7 +643,7 @@ void test_vsuxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return vsuxei16_v_f64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8_m( @@ -652,7 +652,7 @@ void test_vsuxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return vsuxei16_v_f64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_f64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8_m( @@ -661,7 +661,7 @@ void test_vsuxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return vsuxei16_v_i8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4_m( @@ -670,7 +670,7 @@ void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return vsuxei16_v_i8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2_m( @@ -679,7 +679,7 @@ void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return vsuxei16_v_i8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1_m( @@ -688,7 +688,7 @@ void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return vsuxei16_v_i8m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2_m( @@ -697,7 +697,7 @@ void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return vsuxei16_v_i8m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4_m( @@ -706,7 +706,7 @@ void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return vsuxei16_v_i8m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i8m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4_m( @@ -715,7 +715,7 @@ void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return vsuxei16_v_i16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2_m( @@ -724,7 +724,7 @@ void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return vsuxei16_v_i16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1_m( @@ -733,7 +733,7 @@ void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return vsuxei16_v_i16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2_m( @@ -742,7 +742,7 @@ void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return vsuxei16_v_i16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4_m( @@ -751,7 +751,7 @@ void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return vsuxei16_v_i16m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8_m( @@ -760,7 +760,7 @@ void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return vsuxei16_v_i16m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2_m( @@ -769,7 +769,7 @@ void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return vsuxei16_v_i32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1_m( @@ -778,7 +778,7 @@ void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return vsuxei16_v_i32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2_m( @@ -787,7 +787,7 @@ void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return vsuxei16_v_i32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4_m( @@ -796,7 +796,7 @@ void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return vsuxei16_v_i32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8_m( @@ -805,7 +805,7 @@ void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return vsuxei16_v_i32m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1_m( @@ -814,7 +814,7 @@ void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return vsuxei16_v_i64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2_m( @@ -823,7 +823,7 @@ void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return vsuxei16_v_i64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4_m( @@ -832,7 +832,7 @@ void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return vsuxei16_v_i64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8_m( @@ -841,7 +841,7 @@ void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return vsuxei16_v_i64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_i64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8_m( @@ -850,7 +850,7 @@ void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return vsuxei16_v_u8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4_m( @@ -859,7 +859,7 @@ void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return vsuxei16_v_u8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2_m( @@ -868,7 +868,7 @@ void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return vsuxei16_v_u8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1_m( @@ -877,7 +877,7 @@ void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return vsuxei16_v_u8m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2_m( @@ -886,7 +886,7 @@ void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return vsuxei16_v_u8m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4_m( @@ -895,7 +895,7 @@ void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return vsuxei16_v_u8m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u8m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4_m( @@ -904,7 +904,7 @@ void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return vsuxei16_v_u16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2_m( @@ -913,7 +913,7 @@ void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return vsuxei16_v_u16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1_m( @@ -922,7 +922,7 @@ void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return vsuxei16_v_u16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2_m( @@ -931,7 +931,7 @@ void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return vsuxei16_v_u16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4_m( @@ -940,7 +940,7 @@ void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return vsuxei16_v_u16m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8_m( @@ -949,7 +949,7 @@ void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return vsuxei16_v_u16m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2_m( @@ -958,7 +958,7 @@ void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return vsuxei16_v_u32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1_m( @@ -967,7 +967,7 @@ void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return vsuxei16_v_u32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2_m( @@ -976,7 +976,7 @@ void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return vsuxei16_v_u32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4_m( @@ -985,7 +985,7 @@ void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return vsuxei16_v_u32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8_m( @@ -994,7 +994,7 @@ void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return vsuxei16_v_u32m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1_m( @@ -1003,7 +1003,7 @@ void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return vsuxei16_v_u64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2_m( @@ -1012,7 +1012,7 @@ void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return vsuxei16_v_u64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4_m( @@ -1021,7 +1021,7 @@ void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return vsuxei16_v_u64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8_m( @@ -1030,6 +1030,6 @@ void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return vsuxei16_v_u64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei16_v_u64m8_m(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei32.c index 18a0b5e1151f..43df0ce5f0a3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei32_v_f16mf4(base, bindex, value, vl); + return __riscv_vsuxei32_v_f16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei32_v_f16mf2(base, bindex, value, vl); + return __riscv_vsuxei32_v_f16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t va // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei32_v_f16m1(base, bindex, value, vl); + return __riscv_vsuxei32_v_f16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei32_v_f16m2(base, bindex, value, vl); + return __riscv_vsuxei32_v_f16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4( @@ -49,7 +49,7 @@ void test_vsuxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei32_v_f16m4(base, bindex, value, vl); + return __riscv_vsuxei32_v_f16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2( @@ -58,7 +58,7 @@ void test_vsuxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return vsuxei32_v_f32mf2(base, bindex, value, vl); + return __riscv_vsuxei32_v_f32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1( @@ -67,7 +67,7 @@ void test_vsuxei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return vsuxei32_v_f32m1(base, bindex, value, vl); + return __riscv_vsuxei32_v_f32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2( @@ -76,7 +76,7 @@ void test_vsuxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return vsuxei32_v_f32m2(base, bindex, value, vl); + return __riscv_vsuxei32_v_f32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4( @@ -85,7 +85,7 @@ void test_vsuxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return vsuxei32_v_f32m4(base, bindex, value, vl); + return __riscv_vsuxei32_v_f32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8( @@ -94,7 +94,7 @@ void test_vsuxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return vsuxei32_v_f32m8(base, bindex, value, vl); + return __riscv_vsuxei32_v_f32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1( @@ -103,7 +103,7 @@ void test_vsuxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return vsuxei32_v_f64m1(base, bindex, value, vl); + return __riscv_vsuxei32_v_f64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2( @@ -112,7 +112,7 @@ void test_vsuxei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return vsuxei32_v_f64m2(base, bindex, value, vl); + return __riscv_vsuxei32_v_f64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4( @@ -121,7 +121,7 @@ void test_vsuxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return vsuxei32_v_f64m4(base, bindex, value, vl); + return __riscv_vsuxei32_v_f64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8( @@ -130,7 +130,7 @@ void test_vsuxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return vsuxei32_v_f64m8(base, bindex, value, vl); + return __riscv_vsuxei32_v_f64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8( @@ -139,7 +139,7 @@ void test_vsuxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return vsuxei32_v_i8mf8(base, bindex, value, vl); + return __riscv_vsuxei32_v_i8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4( @@ -148,7 +148,7 @@ void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return vsuxei32_v_i8mf4(base, bindex, value, vl); + return __riscv_vsuxei32_v_i8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2( @@ -157,7 +157,7 @@ void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return vsuxei32_v_i8mf2(base, bindex, value, vl); + return __riscv_vsuxei32_v_i8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1( @@ -166,7 +166,7 @@ void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return vsuxei32_v_i8m1(base, bindex, value, vl); + return __riscv_vsuxei32_v_i8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2( @@ -175,7 +175,7 @@ void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return vsuxei32_v_i8m2(base, bindex, value, vl); + return __riscv_vsuxei32_v_i8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4( @@ -184,7 +184,7 @@ void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return vsuxei32_v_i16mf4(base, bindex, value, vl); + return __riscv_vsuxei32_v_i16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2( @@ -193,7 +193,7 @@ void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return vsuxei32_v_i16mf2(base, bindex, value, vl); + return __riscv_vsuxei32_v_i16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1( @@ -202,7 +202,7 @@ void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return vsuxei32_v_i16m1(base, bindex, value, vl); + return __riscv_vsuxei32_v_i16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2( @@ -211,7 +211,7 @@ void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return vsuxei32_v_i16m2(base, bindex, value, vl); + return __riscv_vsuxei32_v_i16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4( @@ -220,7 +220,7 @@ void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return vsuxei32_v_i16m4(base, bindex, value, vl); + return __riscv_vsuxei32_v_i16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2( @@ -229,7 +229,7 @@ void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return vsuxei32_v_i32mf2(base, bindex, value, vl); + return __riscv_vsuxei32_v_i32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1( @@ -238,7 +238,7 @@ void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return vsuxei32_v_i32m1(base, bindex, value, vl); + return __riscv_vsuxei32_v_i32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2( @@ -247,7 +247,7 @@ void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return vsuxei32_v_i32m2(base, bindex, value, vl); + return __riscv_vsuxei32_v_i32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4( @@ -256,7 +256,7 @@ void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return vsuxei32_v_i32m4(base, bindex, value, vl); + return __riscv_vsuxei32_v_i32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8( @@ -265,7 +265,7 @@ void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return vsuxei32_v_i32m8(base, bindex, value, vl); + return __riscv_vsuxei32_v_i32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1( @@ -274,7 +274,7 @@ void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return vsuxei32_v_i64m1(base, bindex, value, vl); + return __riscv_vsuxei32_v_i64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2( @@ -283,7 +283,7 @@ void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return vsuxei32_v_i64m2(base, bindex, value, vl); + return __riscv_vsuxei32_v_i64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4( @@ -292,7 +292,7 @@ void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return vsuxei32_v_i64m4(base, bindex, value, vl); + return __riscv_vsuxei32_v_i64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8( @@ -301,7 +301,7 @@ void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return vsuxei32_v_i64m8(base, bindex, value, vl); + return __riscv_vsuxei32_v_i64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8( @@ -310,7 +310,7 @@ void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return vsuxei32_v_u8mf8(base, bindex, value, vl); + return __riscv_vsuxei32_v_u8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4( @@ -319,7 +319,7 @@ void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return vsuxei32_v_u8mf4(base, bindex, value, vl); + return __riscv_vsuxei32_v_u8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2( @@ -328,7 +328,7 @@ void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return vsuxei32_v_u8mf2(base, bindex, value, vl); + return __riscv_vsuxei32_v_u8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1( @@ -337,7 +337,7 @@ void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return vsuxei32_v_u8m1(base, bindex, value, vl); + return __riscv_vsuxei32_v_u8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2( @@ -346,7 +346,7 @@ void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return vsuxei32_v_u8m2(base, bindex, value, vl); + return __riscv_vsuxei32_v_u8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4( @@ -355,7 +355,7 @@ void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return vsuxei32_v_u16mf4(base, bindex, value, vl); + return __riscv_vsuxei32_v_u16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2( @@ -364,7 +364,7 @@ void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t va // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return vsuxei32_v_u16mf2(base, bindex, value, vl); + return __riscv_vsuxei32_v_u16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1( @@ -373,7 +373,7 @@ void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t val // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return vsuxei32_v_u16m1(base, bindex, value, vl); + return __riscv_vsuxei32_v_u16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2( @@ -382,7 +382,7 @@ void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return vsuxei32_v_u16m2(base, bindex, value, vl); + return __riscv_vsuxei32_v_u16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4( @@ -391,7 +391,7 @@ void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return vsuxei32_v_u16m4(base, bindex, value, vl); + return __riscv_vsuxei32_v_u16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2( @@ -400,7 +400,7 @@ void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return vsuxei32_v_u32mf2(base, bindex, value, vl); + return __riscv_vsuxei32_v_u32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1( @@ -409,7 +409,7 @@ void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t va // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return vsuxei32_v_u32m1(base, bindex, value, vl); + return __riscv_vsuxei32_v_u32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2( @@ -418,7 +418,7 @@ void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return vsuxei32_v_u32m2(base, bindex, value, vl); + return __riscv_vsuxei32_v_u32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4( @@ -427,7 +427,7 @@ void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return vsuxei32_v_u32m4(base, bindex, value, vl); + return __riscv_vsuxei32_v_u32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8( @@ -436,7 +436,7 @@ void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return vsuxei32_v_u32m8(base, bindex, value, vl); + return __riscv_vsuxei32_v_u32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1( @@ -445,7 +445,7 @@ void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return vsuxei32_v_u64m1(base, bindex, value, vl); + return __riscv_vsuxei32_v_u64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2( @@ -454,7 +454,7 @@ void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return vsuxei32_v_u64m2(base, bindex, value, vl); + return __riscv_vsuxei32_v_u64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4( @@ -463,7 +463,7 @@ void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return vsuxei32_v_u64m4(base, bindex, value, vl); + return __riscv_vsuxei32_v_u64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8( @@ -472,7 +472,7 @@ void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return vsuxei32_v_u64m8(base, bindex, value, vl); + return __riscv_vsuxei32_v_u64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4_m( @@ -481,7 +481,7 @@ void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei32_v_f16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2_m( @@ -490,7 +490,7 @@ void test_vsuxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei32_v_f16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1_m( @@ -499,7 +499,7 @@ void test_vsuxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei32_v_f16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2_m( @@ -508,7 +508,7 @@ void test_vsuxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei32_v_f16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4_m( @@ -517,7 +517,7 @@ void test_vsuxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei32_v_f16m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2_m( @@ -526,7 +526,7 @@ void test_vsuxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return vsuxei32_v_f32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1_m( @@ -535,7 +535,7 @@ void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return vsuxei32_v_f32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2_m( @@ -544,7 +544,7 @@ void test_vsuxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return vsuxei32_v_f32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4_m( @@ -553,7 +553,7 @@ void test_vsuxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return vsuxei32_v_f32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8_m( @@ -562,7 +562,7 @@ void test_vsuxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return vsuxei32_v_f32m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1_m( @@ -571,7 +571,7 @@ void test_vsuxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return vsuxei32_v_f64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2_m( @@ -580,7 +580,7 @@ void test_vsuxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return vsuxei32_v_f64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4_m( @@ -589,7 +589,7 @@ void test_vsuxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return vsuxei32_v_f64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8_m( @@ -598,7 +598,7 @@ void test_vsuxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return vsuxei32_v_f64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_f64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8_m( @@ -607,7 +607,7 @@ void test_vsuxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return vsuxei32_v_i8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4_m( @@ -616,7 +616,7 @@ void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return vsuxei32_v_i8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2_m( @@ -625,7 +625,7 @@ void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return vsuxei32_v_i8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1_m( @@ -634,7 +634,7 @@ void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return vsuxei32_v_i8m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2_m( @@ -643,7 +643,7 @@ void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return vsuxei32_v_i8m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4_m( @@ -652,7 +652,7 @@ void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return vsuxei32_v_i16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2_m( @@ -661,7 +661,7 @@ void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return vsuxei32_v_i16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1_m( @@ -670,7 +670,7 @@ void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return vsuxei32_v_i16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2_m( @@ -679,7 +679,7 @@ void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return vsuxei32_v_i16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4_m( @@ -688,7 +688,7 @@ void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return vsuxei32_v_i16m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2_m( @@ -697,7 +697,7 @@ void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return vsuxei32_v_i32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1_m( @@ -706,7 +706,7 @@ void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return vsuxei32_v_i32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2_m( @@ -715,7 +715,7 @@ void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return vsuxei32_v_i32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4_m( @@ -724,7 +724,7 @@ void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return vsuxei32_v_i32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8_m( @@ -733,7 +733,7 @@ void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return vsuxei32_v_i32m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1_m( @@ -742,7 +742,7 @@ void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return vsuxei32_v_i64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2_m( @@ -751,7 +751,7 @@ void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return vsuxei32_v_i64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4_m( @@ -760,7 +760,7 @@ void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return vsuxei32_v_i64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8_m( @@ -769,7 +769,7 @@ void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return vsuxei32_v_i64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_i64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8_m( @@ -778,7 +778,7 @@ void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return vsuxei32_v_u8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4_m( @@ -787,7 +787,7 @@ void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return vsuxei32_v_u8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2_m( @@ -796,7 +796,7 @@ void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return vsuxei32_v_u8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1_m( @@ -805,7 +805,7 @@ void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return vsuxei32_v_u8m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2_m( @@ -814,7 +814,7 @@ void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return vsuxei32_v_u8m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4_m( @@ -823,7 +823,7 @@ void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return vsuxei32_v_u16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2_m( @@ -832,7 +832,7 @@ void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return vsuxei32_v_u16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1_m( @@ -841,7 +841,7 @@ void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return vsuxei32_v_u16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2_m( @@ -850,7 +850,7 @@ void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return vsuxei32_v_u16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4_m( @@ -859,7 +859,7 @@ void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return vsuxei32_v_u16m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2_m( @@ -868,7 +868,7 @@ void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return vsuxei32_v_u32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1_m( @@ -877,7 +877,7 @@ void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return vsuxei32_v_u32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2_m( @@ -886,7 +886,7 @@ void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return vsuxei32_v_u32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4_m( @@ -895,7 +895,7 @@ void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return vsuxei32_v_u32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8_m( @@ -904,7 +904,7 @@ void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return vsuxei32_v_u32m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1_m( @@ -913,7 +913,7 @@ void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return vsuxei32_v_u64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2_m( @@ -922,7 +922,7 @@ void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return vsuxei32_v_u64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4_m( @@ -931,7 +931,7 @@ void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return vsuxei32_v_u64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8_m( @@ -940,6 +940,6 @@ void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return vsuxei32_v_u64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei32_v_u64m8_m(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei64.c index 5c371bc05686..68b8c467ff4d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei64_v_f16mf4(base, bindex, value, vl); + return __riscv_vsuxei64_v_f16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t va // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei64_v_f16mf2(base, bindex, value, vl); + return __riscv_vsuxei64_v_f16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t va // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei64_v_f16m1(base, bindex, value, vl); + return __riscv_vsuxei64_v_f16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei64_v_f16m2(base, bindex, value, vl); + return __riscv_vsuxei64_v_f16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsuxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return vsuxei64_v_f32mf2(base, bindex, value, vl); + return __riscv_vsuxei64_v_f32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1( @@ -58,7 +58,7 @@ void test_vsuxei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return vsuxei64_v_f32m1(base, bindex, value, vl); + return __riscv_vsuxei64_v_f32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2( @@ -67,7 +67,7 @@ void test_vsuxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return vsuxei64_v_f32m2(base, bindex, value, vl); + return __riscv_vsuxei64_v_f32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4( @@ -76,7 +76,7 @@ void test_vsuxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return vsuxei64_v_f32m4(base, bindex, value, vl); + return __riscv_vsuxei64_v_f32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1( @@ -85,7 +85,7 @@ void test_vsuxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return vsuxei64_v_f64m1(base, bindex, value, vl); + return __riscv_vsuxei64_v_f64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2( @@ -94,7 +94,7 @@ void test_vsuxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return vsuxei64_v_f64m2(base, bindex, value, vl); + return __riscv_vsuxei64_v_f64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4( @@ -103,7 +103,7 @@ void test_vsuxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return vsuxei64_v_f64m4(base, bindex, value, vl); + return __riscv_vsuxei64_v_f64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8( @@ -112,7 +112,7 @@ void test_vsuxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return vsuxei64_v_f64m8(base, bindex, value, vl); + return __riscv_vsuxei64_v_f64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8( @@ -121,7 +121,7 @@ void test_vsuxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return vsuxei64_v_i8mf8(base, bindex, value, vl); + return __riscv_vsuxei64_v_i8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4( @@ -130,7 +130,7 @@ void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return vsuxei64_v_i8mf4(base, bindex, value, vl); + return __riscv_vsuxei64_v_i8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2( @@ -139,7 +139,7 @@ void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return vsuxei64_v_i8mf2(base, bindex, value, vl); + return __riscv_vsuxei64_v_i8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1( @@ -148,7 +148,7 @@ void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return vsuxei64_v_i8m1(base, bindex, value, vl); + return __riscv_vsuxei64_v_i8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4( @@ -157,7 +157,7 @@ void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return vsuxei64_v_i16mf4(base, bindex, value, vl); + return __riscv_vsuxei64_v_i16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2( @@ -166,7 +166,7 @@ void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return vsuxei64_v_i16mf2(base, bindex, value, vl); + return __riscv_vsuxei64_v_i16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1( @@ -175,7 +175,7 @@ void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return vsuxei64_v_i16m1(base, bindex, value, vl); + return __riscv_vsuxei64_v_i16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2( @@ -184,7 +184,7 @@ void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return vsuxei64_v_i16m2(base, bindex, value, vl); + return __riscv_vsuxei64_v_i16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2( @@ -193,7 +193,7 @@ void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return vsuxei64_v_i32mf2(base, bindex, value, vl); + return __riscv_vsuxei64_v_i32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1( @@ -202,7 +202,7 @@ void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return vsuxei64_v_i32m1(base, bindex, value, vl); + return __riscv_vsuxei64_v_i32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2( @@ -211,7 +211,7 @@ void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return vsuxei64_v_i32m2(base, bindex, value, vl); + return __riscv_vsuxei64_v_i32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4( @@ -220,7 +220,7 @@ void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return vsuxei64_v_i32m4(base, bindex, value, vl); + return __riscv_vsuxei64_v_i32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1( @@ -229,7 +229,7 @@ void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return vsuxei64_v_i64m1(base, bindex, value, vl); + return __riscv_vsuxei64_v_i64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2( @@ -238,7 +238,7 @@ void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return vsuxei64_v_i64m2(base, bindex, value, vl); + return __riscv_vsuxei64_v_i64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4( @@ -247,7 +247,7 @@ void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return vsuxei64_v_i64m4(base, bindex, value, vl); + return __riscv_vsuxei64_v_i64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8( @@ -256,7 +256,7 @@ void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return vsuxei64_v_i64m8(base, bindex, value, vl); + return __riscv_vsuxei64_v_i64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8( @@ -265,7 +265,7 @@ void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return vsuxei64_v_u8mf8(base, bindex, value, vl); + return __riscv_vsuxei64_v_u8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4( @@ -274,7 +274,7 @@ void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return vsuxei64_v_u8mf4(base, bindex, value, vl); + return __riscv_vsuxei64_v_u8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2( @@ -283,7 +283,7 @@ void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return vsuxei64_v_u8mf2(base, bindex, value, vl); + return __riscv_vsuxei64_v_u8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1( @@ -292,7 +292,7 @@ void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return vsuxei64_v_u8m1(base, bindex, value, vl); + return __riscv_vsuxei64_v_u8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4( @@ -301,7 +301,7 @@ void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return vsuxei64_v_u16mf4(base, bindex, value, vl); + return __riscv_vsuxei64_v_u16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2( @@ -310,7 +310,7 @@ void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t val // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return vsuxei64_v_u16mf2(base, bindex, value, vl); + return __riscv_vsuxei64_v_u16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1( @@ -319,7 +319,7 @@ void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t val // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return vsuxei64_v_u16m1(base, bindex, value, vl); + return __riscv_vsuxei64_v_u16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2( @@ -328,7 +328,7 @@ void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return vsuxei64_v_u16m2(base, bindex, value, vl); + return __riscv_vsuxei64_v_u16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2( @@ -337,7 +337,7 @@ void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return vsuxei64_v_u32mf2(base, bindex, value, vl); + return __riscv_vsuxei64_v_u32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1( @@ -346,7 +346,7 @@ void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t val // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return vsuxei64_v_u32m1(base, bindex, value, vl); + return __riscv_vsuxei64_v_u32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2( @@ -355,7 +355,7 @@ void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return vsuxei64_v_u32m2(base, bindex, value, vl); + return __riscv_vsuxei64_v_u32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4( @@ -364,7 +364,7 @@ void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return vsuxei64_v_u32m4(base, bindex, value, vl); + return __riscv_vsuxei64_v_u32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1( @@ -373,7 +373,7 @@ void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return vsuxei64_v_u64m1(base, bindex, value, vl); + return __riscv_vsuxei64_v_u64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2( @@ -382,7 +382,7 @@ void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return vsuxei64_v_u64m2(base, bindex, value, vl); + return __riscv_vsuxei64_v_u64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4( @@ -391,7 +391,7 @@ void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return vsuxei64_v_u64m4(base, bindex, value, vl); + return __riscv_vsuxei64_v_u64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8( @@ -400,7 +400,7 @@ void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return vsuxei64_v_u64m8(base, bindex, value, vl); + return __riscv_vsuxei64_v_u64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4_m( @@ -409,7 +409,7 @@ void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei64_v_f16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2_m( @@ -418,7 +418,7 @@ void test_vsuxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei64_v_f16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1_m( @@ -427,7 +427,7 @@ void test_vsuxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei64_v_f16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2_m( @@ -436,7 +436,7 @@ void test_vsuxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei64_v_f16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2_m( @@ -445,7 +445,7 @@ void test_vsuxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return vsuxei64_v_f32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1_m( @@ -454,7 +454,7 @@ void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return vsuxei64_v_f32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2_m( @@ -463,7 +463,7 @@ void test_vsuxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return vsuxei64_v_f32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4_m( @@ -472,7 +472,7 @@ void test_vsuxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return vsuxei64_v_f32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1_m( @@ -481,7 +481,7 @@ void test_vsuxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return vsuxei64_v_f64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2_m( @@ -490,7 +490,7 @@ void test_vsuxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return vsuxei64_v_f64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4_m( @@ -499,7 +499,7 @@ void test_vsuxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return vsuxei64_v_f64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8_m( @@ -508,7 +508,7 @@ void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return vsuxei64_v_f64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_f64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8_m( @@ -517,7 +517,7 @@ void test_vsuxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return vsuxei64_v_i8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4_m( @@ -526,7 +526,7 @@ void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return vsuxei64_v_i8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2_m( @@ -535,7 +535,7 @@ void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return vsuxei64_v_i8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1_m( @@ -544,7 +544,7 @@ void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return vsuxei64_v_i8m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4_m( @@ -553,7 +553,7 @@ void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return vsuxei64_v_i16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2_m( @@ -562,7 +562,7 @@ void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return vsuxei64_v_i16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1_m( @@ -571,7 +571,7 @@ void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return vsuxei64_v_i16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2_m( @@ -580,7 +580,7 @@ void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return vsuxei64_v_i16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2_m( @@ -589,7 +589,7 @@ void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return vsuxei64_v_i32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1_m( @@ -598,7 +598,7 @@ void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return vsuxei64_v_i32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2_m( @@ -607,7 +607,7 @@ void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return vsuxei64_v_i32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4_m( @@ -616,7 +616,7 @@ void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return vsuxei64_v_i32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1_m( @@ -625,7 +625,7 @@ void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return vsuxei64_v_i64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2_m( @@ -634,7 +634,7 @@ void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return vsuxei64_v_i64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4_m( @@ -643,7 +643,7 @@ void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return vsuxei64_v_i64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8_m( @@ -652,7 +652,7 @@ void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return vsuxei64_v_i64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_i64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8_m( @@ -661,7 +661,7 @@ void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return vsuxei64_v_u8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4_m( @@ -670,7 +670,7 @@ void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return vsuxei64_v_u8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2_m( @@ -679,7 +679,7 @@ void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return vsuxei64_v_u8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1_m( @@ -688,7 +688,7 @@ void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return vsuxei64_v_u8m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4_m( @@ -697,7 +697,7 @@ void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return vsuxei64_v_u16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2_m( @@ -706,7 +706,7 @@ void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return vsuxei64_v_u16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1_m( @@ -715,7 +715,7 @@ void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return vsuxei64_v_u16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2_m( @@ -724,7 +724,7 @@ void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return vsuxei64_v_u16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2_m( @@ -733,7 +733,7 @@ void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return vsuxei64_v_u32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1_m( @@ -742,7 +742,7 @@ void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return vsuxei64_v_u32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2_m( @@ -751,7 +751,7 @@ void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return vsuxei64_v_u32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4_m( @@ -760,7 +760,7 @@ void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return vsuxei64_v_u32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1_m( @@ -769,7 +769,7 @@ void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return vsuxei64_v_u64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2_m( @@ -778,7 +778,7 @@ void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return vsuxei64_v_u64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4_m( @@ -787,7 +787,7 @@ void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return vsuxei64_v_u64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8_m( @@ -796,6 +796,6 @@ void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return vsuxei64_v_u64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei64_v_u64m8_m(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei8.c index fb551c1e127a..44413640bc33 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei8_v_f16mf4(base, bindex, value, vl); + return __riscv_vsuxei8_v_f16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t val // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei8_v_f16mf2(base, bindex, value, vl); + return __riscv_vsuxei8_v_f16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t val // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei8_v_f16m1(base, bindex, value, vl); + return __riscv_vsuxei8_v_f16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei8_v_f16m2(base, bindex, value, vl); + return __riscv_vsuxei8_v_f16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4( @@ -49,7 +49,7 @@ void test_vsuxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei8_v_f16m4(base, bindex, value, vl); + return __riscv_vsuxei8_v_f16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8( @@ -58,7 +58,7 @@ void test_vsuxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return vsuxei8_v_f16m8(base, bindex, value, vl); + return __riscv_vsuxei8_v_f16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2( @@ -67,7 +67,7 @@ void test_vsuxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return vsuxei8_v_f32mf2(base, bindex, value, vl); + return __riscv_vsuxei8_v_f32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1( @@ -76,7 +76,7 @@ void test_vsuxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return vsuxei8_v_f32m1(base, bindex, value, vl); + return __riscv_vsuxei8_v_f32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2( @@ -85,7 +85,7 @@ void test_vsuxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return vsuxei8_v_f32m2(base, bindex, value, vl); + return __riscv_vsuxei8_v_f32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4( @@ -94,7 +94,7 @@ void test_vsuxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return vsuxei8_v_f32m4(base, bindex, value, vl); + return __riscv_vsuxei8_v_f32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8( @@ -103,7 +103,7 @@ void test_vsuxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, si // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return vsuxei8_v_f32m8(base, bindex, value, vl); + return __riscv_vsuxei8_v_f32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1( @@ -112,7 +112,7 @@ void test_vsuxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, si // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return vsuxei8_v_f64m1(base, bindex, value, vl); + return __riscv_vsuxei8_v_f64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2( @@ -121,7 +121,7 @@ void test_vsuxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return vsuxei8_v_f64m2(base, bindex, value, vl); + return __riscv_vsuxei8_v_f64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4( @@ -130,7 +130,7 @@ void test_vsuxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return vsuxei8_v_f64m4(base, bindex, value, vl); + return __riscv_vsuxei8_v_f64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8( @@ -139,7 +139,7 @@ void test_vsuxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return vsuxei8_v_f64m8(base, bindex, value, vl); + return __riscv_vsuxei8_v_f64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8( @@ -148,7 +148,7 @@ void test_vsuxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return vsuxei8_v_i8mf8(base, bindex, value, vl); + return __riscv_vsuxei8_v_i8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4( @@ -157,7 +157,7 @@ void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, si // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return vsuxei8_v_i8mf4(base, bindex, value, vl); + return __riscv_vsuxei8_v_i8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2( @@ -166,7 +166,7 @@ void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, si // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return vsuxei8_v_i8mf2(base, bindex, value, vl); + return __riscv_vsuxei8_v_i8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1( @@ -175,7 +175,7 @@ void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, si // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return vsuxei8_v_i8m1(base, bindex, value, vl); + return __riscv_vsuxei8_v_i8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2( @@ -184,7 +184,7 @@ void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return vsuxei8_v_i8m2(base, bindex, value, vl); + return __riscv_vsuxei8_v_i8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4( @@ -193,7 +193,7 @@ void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return vsuxei8_v_i8m4(base, bindex, value, vl); + return __riscv_vsuxei8_v_i8m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8( @@ -202,7 +202,7 @@ void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return vsuxei8_v_i8m8(base, bindex, value, vl); + return __riscv_vsuxei8_v_i8m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4( @@ -211,7 +211,7 @@ void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_ // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return vsuxei8_v_i16mf4(base, bindex, value, vl); + return __riscv_vsuxei8_v_i16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2( @@ -220,7 +220,7 @@ void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return vsuxei8_v_i16mf2(base, bindex, value, vl); + return __riscv_vsuxei8_v_i16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1( @@ -229,7 +229,7 @@ void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return vsuxei8_v_i16m1(base, bindex, value, vl); + return __riscv_vsuxei8_v_i16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2( @@ -238,7 +238,7 @@ void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return vsuxei8_v_i16m2(base, bindex, value, vl); + return __riscv_vsuxei8_v_i16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4( @@ -247,7 +247,7 @@ void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, si // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return vsuxei8_v_i16m4(base, bindex, value, vl); + return __riscv_vsuxei8_v_i16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8( @@ -256,7 +256,7 @@ void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, si // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return vsuxei8_v_i16m8(base, bindex, value, vl); + return __riscv_vsuxei8_v_i16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2( @@ -265,7 +265,7 @@ void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, si // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return vsuxei8_v_i32mf2(base, bindex, value, vl); + return __riscv_vsuxei8_v_i32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1( @@ -274,7 +274,7 @@ void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return vsuxei8_v_i32m1(base, bindex, value, vl); + return __riscv_vsuxei8_v_i32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2( @@ -283,7 +283,7 @@ void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return vsuxei8_v_i32m2(base, bindex, value, vl); + return __riscv_vsuxei8_v_i32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4( @@ -292,7 +292,7 @@ void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return vsuxei8_v_i32m4(base, bindex, value, vl); + return __riscv_vsuxei8_v_i32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8( @@ -301,7 +301,7 @@ void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, si // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return vsuxei8_v_i32m8(base, bindex, value, vl); + return __riscv_vsuxei8_v_i32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1( @@ -310,7 +310,7 @@ void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, si // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return vsuxei8_v_i64m1(base, bindex, value, vl); + return __riscv_vsuxei8_v_i64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2( @@ -319,7 +319,7 @@ void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return vsuxei8_v_i64m2(base, bindex, value, vl); + return __riscv_vsuxei8_v_i64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4( @@ -328,7 +328,7 @@ void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return vsuxei8_v_i64m4(base, bindex, value, vl); + return __riscv_vsuxei8_v_i64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8( @@ -337,7 +337,7 @@ void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, s // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return vsuxei8_v_i64m8(base, bindex, value, vl); + return __riscv_vsuxei8_v_i64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8( @@ -346,7 +346,7 @@ void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, si // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return vsuxei8_v_u8mf8(base, bindex, value, vl); + return __riscv_vsuxei8_v_u8mf8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4( @@ -355,7 +355,7 @@ void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return vsuxei8_v_u8mf4(base, bindex, value, vl); + return __riscv_vsuxei8_v_u8mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2( @@ -364,7 +364,7 @@ void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return vsuxei8_v_u8mf2(base, bindex, value, vl); + return __riscv_vsuxei8_v_u8mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1( @@ -373,7 +373,7 @@ void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return vsuxei8_v_u8m1(base, bindex, value, vl); + return __riscv_vsuxei8_v_u8m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2( @@ -382,7 +382,7 @@ void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return vsuxei8_v_u8m2(base, bindex, value, vl); + return __riscv_vsuxei8_v_u8m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4( @@ -391,7 +391,7 @@ void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return vsuxei8_v_u8m4(base, bindex, value, vl); + return __riscv_vsuxei8_v_u8m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8( @@ -400,7 +400,7 @@ void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return vsuxei8_v_u8m8(base, bindex, value, vl); + return __riscv_vsuxei8_v_u8m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4( @@ -409,7 +409,7 @@ void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, siz // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return vsuxei8_v_u16mf4(base, bindex, value, vl); + return __riscv_vsuxei8_v_u16mf4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2( @@ -418,7 +418,7 @@ void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return vsuxei8_v_u16mf2(base, bindex, value, vl); + return __riscv_vsuxei8_v_u16mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1( @@ -427,7 +427,7 @@ void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return vsuxei8_v_u16m1(base, bindex, value, vl); + return __riscv_vsuxei8_v_u16m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2( @@ -436,7 +436,7 @@ void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return vsuxei8_v_u16m2(base, bindex, value, vl); + return __riscv_vsuxei8_v_u16m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4( @@ -445,7 +445,7 @@ void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return vsuxei8_v_u16m4(base, bindex, value, vl); + return __riscv_vsuxei8_v_u16m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8( @@ -454,7 +454,7 @@ void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return vsuxei8_v_u16m8(base, bindex, value, vl); + return __riscv_vsuxei8_v_u16m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2( @@ -463,7 +463,7 @@ void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return vsuxei8_v_u32mf2(base, bindex, value, vl); + return __riscv_vsuxei8_v_u32mf2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1( @@ -472,7 +472,7 @@ void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t valu // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return vsuxei8_v_u32m1(base, bindex, value, vl); + return __riscv_vsuxei8_v_u32m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2( @@ -481,7 +481,7 @@ void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return vsuxei8_v_u32m2(base, bindex, value, vl); + return __riscv_vsuxei8_v_u32m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4( @@ -490,7 +490,7 @@ void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return vsuxei8_v_u32m4(base, bindex, value, vl); + return __riscv_vsuxei8_v_u32m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8( @@ -499,7 +499,7 @@ void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return vsuxei8_v_u32m8(base, bindex, value, vl); + return __riscv_vsuxei8_v_u32m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1( @@ -508,7 +508,7 @@ void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return vsuxei8_v_u64m1(base, bindex, value, vl); + return __riscv_vsuxei8_v_u64m1(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2( @@ -517,7 +517,7 @@ void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return vsuxei8_v_u64m2(base, bindex, value, vl); + return __riscv_vsuxei8_v_u64m2(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4( @@ -526,7 +526,7 @@ void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return vsuxei8_v_u64m4(base, bindex, value, vl); + return __riscv_vsuxei8_v_u64m4(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8( @@ -535,7 +535,7 @@ void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return vsuxei8_v_u64m8(base, bindex, value, vl); + return __riscv_vsuxei8_v_u64m8(base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4_m( @@ -544,7 +544,7 @@ void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei8_v_f16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2_m( @@ -553,7 +553,7 @@ void test_vsuxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei8_v_f16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1_m( @@ -562,7 +562,7 @@ void test_vsuxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei8_v_f16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2_m( @@ -571,7 +571,7 @@ void test_vsuxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei8_v_f16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4_m( @@ -580,7 +580,7 @@ void test_vsuxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei8_v_f16m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8_m( @@ -589,7 +589,7 @@ void test_vsuxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return vsuxei8_v_f16m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2_m( @@ -598,7 +598,7 @@ void test_vsuxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return vsuxei8_v_f32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1_m( @@ -607,7 +607,7 @@ void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return vsuxei8_v_f32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2_m( @@ -616,7 +616,7 @@ void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return vsuxei8_v_f32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4_m( @@ -625,7 +625,7 @@ void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfl // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return vsuxei8_v_f32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8_m( @@ -634,7 +634,7 @@ void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloa // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return vsuxei8_v_f32m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1_m( @@ -643,7 +643,7 @@ void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, vfloa // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return vsuxei8_v_f64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2_m( @@ -652,7 +652,7 @@ void test_vsuxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return vsuxei8_v_f64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4_m( @@ -661,7 +661,7 @@ void test_vsuxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return vsuxei8_v_f64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8_m( @@ -670,7 +670,7 @@ void test_vsuxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vf // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return vsuxei8_v_f64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_f64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8_m( @@ -679,7 +679,7 @@ void test_vsuxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, vflo // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return vsuxei8_v_i8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4_m( @@ -688,7 +688,7 @@ void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vi // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return vsuxei8_v_i8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2_m( @@ -697,7 +697,7 @@ void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vi // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return vsuxei8_v_i8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1_m( @@ -706,7 +706,7 @@ void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vi // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return vsuxei8_v_i8m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2_m( @@ -715,7 +715,7 @@ void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8 // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return vsuxei8_v_i8m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4_m( @@ -724,7 +724,7 @@ void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8 // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return vsuxei8_v_i8m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i8m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8_m( @@ -733,7 +733,7 @@ void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8 // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return vsuxei8_v_i8m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i8m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4_m( @@ -742,7 +742,7 @@ void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8 // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return vsuxei8_v_i16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2_m( @@ -751,7 +751,7 @@ void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return vsuxei8_v_i16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1_m( @@ -760,7 +760,7 @@ void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return vsuxei8_v_i16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2_m( @@ -769,7 +769,7 @@ void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return vsuxei8_v_i16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4_m( @@ -778,7 +778,7 @@ void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return vsuxei8_v_i16m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8_m( @@ -787,7 +787,7 @@ void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return vsuxei8_v_i16m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2_m( @@ -796,7 +796,7 @@ void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return vsuxei8_v_i32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1_m( @@ -805,7 +805,7 @@ void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return vsuxei8_v_i32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2_m( @@ -814,7 +814,7 @@ void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return vsuxei8_v_i32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4_m( @@ -823,7 +823,7 @@ void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return vsuxei8_v_i32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8_m( @@ -832,7 +832,7 @@ void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return vsuxei8_v_i32m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1_m( @@ -841,7 +841,7 @@ void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return vsuxei8_v_i64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2_m( @@ -850,7 +850,7 @@ void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return vsuxei8_v_i64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4_m( @@ -859,7 +859,7 @@ void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return vsuxei8_v_i64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8_m( @@ -868,7 +868,7 @@ void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return vsuxei8_v_i64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_i64m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8_m( @@ -877,7 +877,7 @@ void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vin // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return vsuxei8_v_u8mf8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u8mf8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4_m( @@ -886,7 +886,7 @@ void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return vsuxei8_v_u8mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u8mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2_m( @@ -895,7 +895,7 @@ void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return vsuxei8_v_u8mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u8mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1_m( @@ -904,7 +904,7 @@ void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return vsuxei8_v_u8m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u8m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2_m( @@ -913,7 +913,7 @@ void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuin // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return vsuxei8_v_u8m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u8m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4_m( @@ -922,7 +922,7 @@ void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuin // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return vsuxei8_v_u8m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u8m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8_m( @@ -931,7 +931,7 @@ void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuin // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return vsuxei8_v_u8m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u8m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4_m( @@ -940,7 +940,7 @@ void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuin // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return vsuxei8_v_u16mf4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u16mf4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2_m( @@ -949,7 +949,7 @@ void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return vsuxei8_v_u16mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u16mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1_m( @@ -958,7 +958,7 @@ void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return vsuxei8_v_u16m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u16m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2_m( @@ -967,7 +967,7 @@ void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return vsuxei8_v_u16m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u16m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4_m( @@ -976,7 +976,7 @@ void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return vsuxei8_v_u16m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u16m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8_m( @@ -985,7 +985,7 @@ void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return vsuxei8_v_u16m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u16m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2_m( @@ -994,7 +994,7 @@ void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return vsuxei8_v_u32mf2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u32mf2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1_m( @@ -1003,7 +1003,7 @@ void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return vsuxei8_v_u32m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u32m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2_m( @@ -1012,7 +1012,7 @@ void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return vsuxei8_v_u32m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u32m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4_m( @@ -1021,7 +1021,7 @@ void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return vsuxei8_v_u32m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u32m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8_m( @@ -1030,7 +1030,7 @@ void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return vsuxei8_v_u32m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u32m8_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1_m( @@ -1039,7 +1039,7 @@ void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vu // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return vsuxei8_v_u64m1_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u64m1_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2_m( @@ -1048,7 +1048,7 @@ void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return vsuxei8_v_u64m2_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u64m2_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4_m( @@ -1057,7 +1057,7 @@ void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return vsuxei8_v_u64m4_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u64m4_m(mask, base, bindex, value, vl); } // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8_m( @@ -1066,6 +1066,6 @@ void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return vsuxei8_v_u64m8_m(mask, base, bindex, value, vl); + return __riscv_vsuxei8_v_u64m8_m(mask, base, bindex, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c index 407dd569881d..b5e7fb81fe66 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4( @@ -49,7 +49,7 @@ void test_vsuxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2( @@ -58,7 +58,7 @@ void test_vsuxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1( @@ -67,7 +67,7 @@ void test_vsuxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2( @@ -76,7 +76,7 @@ void test_vsuxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4( @@ -85,7 +85,7 @@ void test_vsuxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1( @@ -94,7 +94,7 @@ void test_vsuxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2( @@ -103,7 +103,7 @@ void test_vsuxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4( @@ -112,7 +112,7 @@ void test_vsuxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8( @@ -121,7 +121,7 @@ void test_vsuxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf8(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4( @@ -130,7 +130,7 @@ void test_vsuxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2( @@ -139,7 +139,7 @@ void test_vsuxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1( @@ -148,7 +148,7 @@ void test_vsuxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2( @@ -157,7 +157,7 @@ void test_vsuxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4( @@ -166,7 +166,7 @@ void test_vsuxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4( @@ -175,7 +175,7 @@ void test_vsuxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2( @@ -184,7 +184,7 @@ void test_vsuxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1( @@ -193,7 +193,7 @@ void test_vsuxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2( @@ -202,7 +202,7 @@ void test_vsuxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4( @@ -211,7 +211,7 @@ void test_vsuxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1( @@ -229,7 +229,7 @@ void test_vsuxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2( @@ -238,7 +238,7 @@ void test_vsuxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4( @@ -247,7 +247,7 @@ void test_vsuxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1( @@ -256,7 +256,7 @@ void test_vsuxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2( @@ -265,7 +265,7 @@ void test_vsuxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4( @@ -274,7 +274,7 @@ void test_vsuxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8( @@ -283,7 +283,7 @@ void test_vsuxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf8(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4( @@ -292,7 +292,7 @@ void test_vsuxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2( @@ -301,7 +301,7 @@ void test_vsuxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1( @@ -310,7 +310,7 @@ void test_vsuxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2( @@ -319,7 +319,7 @@ void test_vsuxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4( @@ -328,7 +328,7 @@ void test_vsuxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4( @@ -337,7 +337,7 @@ void test_vsuxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2( @@ -346,7 +346,7 @@ void test_vsuxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1( @@ -355,7 +355,7 @@ void test_vsuxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2( @@ -364,7 +364,7 @@ void test_vsuxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4( @@ -373,7 +373,7 @@ void test_vsuxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2( @@ -382,7 +382,7 @@ void test_vsuxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1( @@ -391,7 +391,7 @@ void test_vsuxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2( @@ -400,7 +400,7 @@ void test_vsuxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4( @@ -409,7 +409,7 @@ void test_vsuxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1( @@ -418,7 +418,7 @@ void test_vsuxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2( @@ -427,7 +427,7 @@ void test_vsuxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4( @@ -436,7 +436,7 @@ void test_vsuxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4_m( @@ -445,7 +445,7 @@ void test_vsuxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2_m( @@ -472,7 +472,7 @@ void test_vsuxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4_m( @@ -481,7 +481,7 @@ void test_vsuxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2_m( @@ -490,7 +490,7 @@ void test_vsuxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1_m( @@ -499,7 +499,7 @@ void test_vsuxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2_m( @@ -508,7 +508,7 @@ void test_vsuxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4_m( @@ -517,7 +517,7 @@ void test_vsuxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1_m( @@ -526,7 +526,7 @@ void test_vsuxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2_m( @@ -535,7 +535,7 @@ void test_vsuxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4_m( @@ -544,7 +544,7 @@ void test_vsuxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8_m( @@ -553,7 +553,7 @@ void test_vsuxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4_m( @@ -562,7 +562,7 @@ void test_vsuxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2_m( @@ -571,7 +571,7 @@ void test_vsuxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1_m( @@ -580,7 +580,7 @@ void test_vsuxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2_m( @@ -589,7 +589,7 @@ void test_vsuxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4_m( @@ -598,7 +598,7 @@ void test_vsuxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4_m( @@ -607,7 +607,7 @@ void test_vsuxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2_m( @@ -616,7 +616,7 @@ void test_vsuxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1_m( @@ -625,7 +625,7 @@ void test_vsuxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2_m( @@ -634,7 +634,7 @@ void test_vsuxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4_m( @@ -643,7 +643,7 @@ void test_vsuxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2_m( @@ -652,7 +652,7 @@ void test_vsuxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1_m( @@ -661,7 +661,7 @@ void test_vsuxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2_m( @@ -670,7 +670,7 @@ void test_vsuxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4_m( @@ -679,7 +679,7 @@ void test_vsuxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1_m( @@ -688,7 +688,7 @@ void test_vsuxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2_m( @@ -697,7 +697,7 @@ void test_vsuxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4_m( @@ -706,7 +706,7 @@ void test_vsuxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8_m( @@ -715,7 +715,7 @@ void test_vsuxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4_m( @@ -724,7 +724,7 @@ void test_vsuxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2_m( @@ -733,7 +733,7 @@ void test_vsuxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1_m( @@ -742,7 +742,7 @@ void test_vsuxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2_m( @@ -751,7 +751,7 @@ void test_vsuxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4_m( @@ -760,7 +760,7 @@ void test_vsuxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4_m( @@ -769,7 +769,7 @@ void test_vsuxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2_m( @@ -778,7 +778,7 @@ void test_vsuxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1_m( @@ -787,7 +787,7 @@ void test_vsuxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2_m( @@ -796,7 +796,7 @@ void test_vsuxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4_m( @@ -805,7 +805,7 @@ void test_vsuxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2_m( @@ -814,7 +814,7 @@ void test_vsuxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1_m( @@ -823,7 +823,7 @@ void test_vsuxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2_m( @@ -832,7 +832,7 @@ void test_vsuxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4_m( @@ -841,7 +841,7 @@ void test_vsuxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1_m( @@ -850,7 +850,7 @@ void test_vsuxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2_m( @@ -859,7 +859,7 @@ void test_vsuxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4_m( @@ -868,6 +868,6 @@ void test_vsuxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c index ec5a5bcb9748..198b919a0877 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4( @@ -49,7 +49,7 @@ void test_vsuxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2( @@ -58,7 +58,7 @@ void test_vsuxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1( @@ -67,7 +67,7 @@ void test_vsuxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2( @@ -76,7 +76,7 @@ void test_vsuxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4( @@ -85,7 +85,7 @@ void test_vsuxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1( @@ -94,7 +94,7 @@ void test_vsuxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2( @@ -103,7 +103,7 @@ void test_vsuxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4( @@ -112,7 +112,7 @@ void test_vsuxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8( @@ -121,7 +121,7 @@ void test_vsuxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf8(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4( @@ -130,7 +130,7 @@ void test_vsuxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2( @@ -139,7 +139,7 @@ void test_vsuxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1( @@ -148,7 +148,7 @@ void test_vsuxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2( @@ -157,7 +157,7 @@ void test_vsuxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4( @@ -166,7 +166,7 @@ void test_vsuxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2( @@ -175,7 +175,7 @@ void test_vsuxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1( @@ -184,7 +184,7 @@ void test_vsuxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2( @@ -193,7 +193,7 @@ void test_vsuxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4( @@ -202,7 +202,7 @@ void test_vsuxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2( @@ -211,7 +211,7 @@ void test_vsuxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1( @@ -220,7 +220,7 @@ void test_vsuxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2( @@ -229,7 +229,7 @@ void test_vsuxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4( @@ -238,7 +238,7 @@ void test_vsuxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1( @@ -247,7 +247,7 @@ void test_vsuxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2( @@ -256,7 +256,7 @@ void test_vsuxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4( @@ -265,7 +265,7 @@ void test_vsuxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8( @@ -274,7 +274,7 @@ void test_vsuxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf8(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4( @@ -283,7 +283,7 @@ void test_vsuxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2( @@ -292,7 +292,7 @@ void test_vsuxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1( @@ -301,7 +301,7 @@ void test_vsuxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2( @@ -310,7 +310,7 @@ void test_vsuxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4( @@ -319,7 +319,7 @@ void test_vsuxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2( @@ -328,7 +328,7 @@ void test_vsuxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1( @@ -337,7 +337,7 @@ void test_vsuxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2( @@ -346,7 +346,7 @@ void test_vsuxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4( @@ -355,7 +355,7 @@ void test_vsuxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2( @@ -364,7 +364,7 @@ void test_vsuxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1( @@ -373,7 +373,7 @@ void test_vsuxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2( @@ -382,7 +382,7 @@ void test_vsuxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4( @@ -391,7 +391,7 @@ void test_vsuxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1( @@ -400,7 +400,7 @@ void test_vsuxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2( @@ -409,7 +409,7 @@ void test_vsuxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4( @@ -418,7 +418,7 @@ void test_vsuxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2_m( @@ -454,7 +454,7 @@ void test_vsuxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4_m( @@ -463,7 +463,7 @@ void test_vsuxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2_m( @@ -472,7 +472,7 @@ void test_vsuxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1_m( @@ -481,7 +481,7 @@ void test_vsuxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2_m( @@ -490,7 +490,7 @@ void test_vsuxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4_m( @@ -499,7 +499,7 @@ void test_vsuxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1_m( @@ -508,7 +508,7 @@ void test_vsuxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2_m( @@ -517,7 +517,7 @@ void test_vsuxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4_m( @@ -526,7 +526,7 @@ void test_vsuxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8_m( @@ -535,7 +535,7 @@ void test_vsuxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4_m( @@ -544,7 +544,7 @@ void test_vsuxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2_m( @@ -553,7 +553,7 @@ void test_vsuxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1_m( @@ -562,7 +562,7 @@ void test_vsuxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2_m( @@ -571,7 +571,7 @@ void test_vsuxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4_m( @@ -580,7 +580,7 @@ void test_vsuxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2_m( @@ -589,7 +589,7 @@ void test_vsuxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1_m( @@ -598,7 +598,7 @@ void test_vsuxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2_m( @@ -607,7 +607,7 @@ void test_vsuxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4_m( @@ -616,7 +616,7 @@ void test_vsuxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2_m( @@ -625,7 +625,7 @@ void test_vsuxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1_m( @@ -634,7 +634,7 @@ void test_vsuxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2_m( @@ -643,7 +643,7 @@ void test_vsuxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4_m( @@ -652,7 +652,7 @@ void test_vsuxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1_m( @@ -661,7 +661,7 @@ void test_vsuxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2_m( @@ -670,7 +670,7 @@ void test_vsuxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4_m( @@ -679,7 +679,7 @@ void test_vsuxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8_m( @@ -688,7 +688,7 @@ void test_vsuxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4_m( @@ -697,7 +697,7 @@ void test_vsuxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2_m( @@ -706,7 +706,7 @@ void test_vsuxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1_m( @@ -715,7 +715,7 @@ void test_vsuxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2_m( @@ -724,7 +724,7 @@ void test_vsuxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4_m( @@ -733,7 +733,7 @@ void test_vsuxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2_m( @@ -742,7 +742,7 @@ void test_vsuxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1_m( @@ -751,7 +751,7 @@ void test_vsuxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2_m( @@ -760,7 +760,7 @@ void test_vsuxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4_m( @@ -769,7 +769,7 @@ void test_vsuxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2_m( @@ -778,7 +778,7 @@ void test_vsuxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1_m( @@ -787,7 +787,7 @@ void test_vsuxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2_m( @@ -796,7 +796,7 @@ void test_vsuxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4_m( @@ -805,7 +805,7 @@ void test_vsuxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1_m( @@ -814,7 +814,7 @@ void test_vsuxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2_m( @@ -823,7 +823,7 @@ void test_vsuxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4_m( @@ -832,6 +832,6 @@ void test_vsuxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c index 155ce7c6d6fb..8e8d79254536 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsuxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1( @@ -58,7 +58,7 @@ void test_vsuxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2( @@ -67,7 +67,7 @@ void test_vsuxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4( @@ -76,7 +76,7 @@ void test_vsuxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1( @@ -85,7 +85,7 @@ void test_vsuxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2( @@ -94,7 +94,7 @@ void test_vsuxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4( @@ -103,7 +103,7 @@ void test_vsuxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8( @@ -112,7 +112,7 @@ void test_vsuxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf8(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4( @@ -121,7 +121,7 @@ void test_vsuxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2( @@ -130,7 +130,7 @@ void test_vsuxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1( @@ -139,7 +139,7 @@ void test_vsuxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i8m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4( @@ -148,7 +148,7 @@ void test_vsuxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2( @@ -157,7 +157,7 @@ void test_vsuxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1( @@ -166,7 +166,7 @@ void test_vsuxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2( @@ -175,7 +175,7 @@ void test_vsuxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2( @@ -184,7 +184,7 @@ void test_vsuxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1( @@ -193,7 +193,7 @@ void test_vsuxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2( @@ -202,7 +202,7 @@ void test_vsuxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4( @@ -211,7 +211,7 @@ void test_vsuxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1( @@ -220,7 +220,7 @@ void test_vsuxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2( @@ -229,7 +229,7 @@ void test_vsuxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4( @@ -238,7 +238,7 @@ void test_vsuxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8( @@ -247,7 +247,7 @@ void test_vsuxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf8(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4( @@ -256,7 +256,7 @@ void test_vsuxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2( @@ -265,7 +265,7 @@ void test_vsuxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1( @@ -274,7 +274,7 @@ void test_vsuxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u8m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4( @@ -283,7 +283,7 @@ void test_vsuxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2( @@ -292,7 +292,7 @@ void test_vsuxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1( @@ -301,7 +301,7 @@ void test_vsuxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2( @@ -310,7 +310,7 @@ void test_vsuxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2( @@ -319,7 +319,7 @@ void test_vsuxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1( @@ -328,7 +328,7 @@ void test_vsuxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2( @@ -337,7 +337,7 @@ void test_vsuxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4( @@ -346,7 +346,7 @@ void test_vsuxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1( @@ -355,7 +355,7 @@ void test_vsuxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2( @@ -364,7 +364,7 @@ void test_vsuxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4( @@ -373,7 +373,7 @@ void test_vsuxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4_m( @@ -382,7 +382,7 @@ void test_vsuxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2_m( @@ -391,7 +391,7 @@ void test_vsuxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1_m( @@ -400,7 +400,7 @@ void test_vsuxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2_m( @@ -409,7 +409,7 @@ void test_vsuxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2_m( @@ -418,7 +418,7 @@ void test_vsuxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1_m( @@ -427,7 +427,7 @@ void test_vsuxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2_m( @@ -436,7 +436,7 @@ void test_vsuxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4_m( @@ -445,7 +445,7 @@ void test_vsuxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1_m( @@ -454,7 +454,7 @@ void test_vsuxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2_m( @@ -463,7 +463,7 @@ void test_vsuxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4_m( @@ -472,7 +472,7 @@ void test_vsuxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8_m( @@ -481,7 +481,7 @@ void test_vsuxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4_m( @@ -490,7 +490,7 @@ void test_vsuxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2_m( @@ -499,7 +499,7 @@ void test_vsuxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1_m( @@ -508,7 +508,7 @@ void test_vsuxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4_m( @@ -517,7 +517,7 @@ void test_vsuxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2_m( @@ -526,7 +526,7 @@ void test_vsuxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1_m( @@ -535,7 +535,7 @@ void test_vsuxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2_m( @@ -544,7 +544,7 @@ void test_vsuxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2_m( @@ -553,7 +553,7 @@ void test_vsuxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1_m( @@ -562,7 +562,7 @@ void test_vsuxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2_m( @@ -571,7 +571,7 @@ void test_vsuxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4_m( @@ -580,7 +580,7 @@ void test_vsuxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1_m( @@ -589,7 +589,7 @@ void test_vsuxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2_m( @@ -598,7 +598,7 @@ void test_vsuxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4_m( @@ -607,7 +607,7 @@ void test_vsuxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8_m( @@ -616,7 +616,7 @@ void test_vsuxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4_m( @@ -625,7 +625,7 @@ void test_vsuxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2_m( @@ -634,7 +634,7 @@ void test_vsuxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1_m( @@ -643,7 +643,7 @@ void test_vsuxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4_m( @@ -652,7 +652,7 @@ void test_vsuxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2_m( @@ -661,7 +661,7 @@ void test_vsuxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1_m( @@ -670,7 +670,7 @@ void test_vsuxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2_m( @@ -679,7 +679,7 @@ void test_vsuxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2_m( @@ -688,7 +688,7 @@ void test_vsuxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1_m( @@ -697,7 +697,7 @@ void test_vsuxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2_m( @@ -706,7 +706,7 @@ void test_vsuxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4_m( @@ -715,7 +715,7 @@ void test_vsuxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1_m( @@ -724,7 +724,7 @@ void test_vsuxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2_m( @@ -733,7 +733,7 @@ void test_vsuxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4_m( @@ -742,6 +742,6 @@ void test_vsuxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c index 03f0889e2103..bed79a7de1e4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4( @@ -49,7 +49,7 @@ void test_vsuxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2( @@ -58,7 +58,7 @@ void test_vsuxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1( @@ -67,7 +67,7 @@ void test_vsuxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2( @@ -76,7 +76,7 @@ void test_vsuxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4( @@ -85,7 +85,7 @@ void test_vsuxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1( @@ -94,7 +94,7 @@ void test_vsuxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2( @@ -103,7 +103,7 @@ void test_vsuxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4( @@ -112,7 +112,7 @@ void test_vsuxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8( @@ -121,7 +121,7 @@ void test_vsuxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf8(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4( @@ -130,7 +130,7 @@ void test_vsuxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2( @@ -139,7 +139,7 @@ void test_vsuxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1( @@ -148,7 +148,7 @@ void test_vsuxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2( @@ -157,7 +157,7 @@ void test_vsuxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4( @@ -166,7 +166,7 @@ void test_vsuxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4( @@ -175,7 +175,7 @@ void test_vsuxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2( @@ -184,7 +184,7 @@ void test_vsuxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1( @@ -193,7 +193,7 @@ void test_vsuxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2( @@ -202,7 +202,7 @@ void test_vsuxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4( @@ -211,7 +211,7 @@ void test_vsuxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1( @@ -229,7 +229,7 @@ void test_vsuxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2( @@ -238,7 +238,7 @@ void test_vsuxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4( @@ -247,7 +247,7 @@ void test_vsuxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1( @@ -256,7 +256,7 @@ void test_vsuxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2( @@ -265,7 +265,7 @@ void test_vsuxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4( @@ -274,7 +274,7 @@ void test_vsuxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8( @@ -283,7 +283,7 @@ void test_vsuxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf8(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8mf8(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4( @@ -292,7 +292,7 @@ void test_vsuxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2( @@ -301,7 +301,7 @@ void test_vsuxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1( @@ -310,7 +310,7 @@ void test_vsuxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2( @@ -319,7 +319,7 @@ void test_vsuxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4( @@ -328,7 +328,7 @@ void test_vsuxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4( @@ -337,7 +337,7 @@ void test_vsuxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u16mf4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2( @@ -346,7 +346,7 @@ void test_vsuxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u16mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1( @@ -355,7 +355,7 @@ void test_vsuxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u16m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2( @@ -364,7 +364,7 @@ void test_vsuxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u16m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4( @@ -373,7 +373,7 @@ void test_vsuxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u16m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2( @@ -382,7 +382,7 @@ void test_vsuxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32mf2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u32mf2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1( @@ -391,7 +391,7 @@ void test_vsuxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u32m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2( @@ -400,7 +400,7 @@ void test_vsuxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u32m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4( @@ -409,7 +409,7 @@ void test_vsuxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u32m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1( @@ -418,7 +418,7 @@ void test_vsuxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m1(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u64m1(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2( @@ -427,7 +427,7 @@ void test_vsuxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m2(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u64m2(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4( @@ -436,7 +436,7 @@ void test_vsuxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m4(base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u64m4(base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4_m( @@ -445,7 +445,7 @@ void test_vsuxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2_m( @@ -472,7 +472,7 @@ void test_vsuxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4_m( @@ -481,7 +481,7 @@ void test_vsuxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2_m( @@ -490,7 +490,7 @@ void test_vsuxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1_m( @@ -499,7 +499,7 @@ void test_vsuxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2_m( @@ -508,7 +508,7 @@ void test_vsuxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4_m( @@ -517,7 +517,7 @@ void test_vsuxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1_m( @@ -526,7 +526,7 @@ void test_vsuxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2_m( @@ -535,7 +535,7 @@ void test_vsuxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4_m( @@ -544,7 +544,7 @@ void test_vsuxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8_m( @@ -553,7 +553,7 @@ void test_vsuxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4_m( @@ -562,7 +562,7 @@ void test_vsuxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2_m( @@ -571,7 +571,7 @@ void test_vsuxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1_m( @@ -580,7 +580,7 @@ void test_vsuxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2_m( @@ -589,7 +589,7 @@ void test_vsuxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4_m( @@ -598,7 +598,7 @@ void test_vsuxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4_m( @@ -607,7 +607,7 @@ void test_vsuxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2_m( @@ -616,7 +616,7 @@ void test_vsuxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1_m( @@ -625,7 +625,7 @@ void test_vsuxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2_m( @@ -634,7 +634,7 @@ void test_vsuxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4_m( @@ -643,7 +643,7 @@ void test_vsuxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2_m( @@ -652,7 +652,7 @@ void test_vsuxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1_m( @@ -661,7 +661,7 @@ void test_vsuxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2_m( @@ -670,7 +670,7 @@ void test_vsuxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4_m( @@ -679,7 +679,7 @@ void test_vsuxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1_m( @@ -688,7 +688,7 @@ void test_vsuxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2_m( @@ -697,7 +697,7 @@ void test_vsuxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4_m( @@ -706,7 +706,7 @@ void test_vsuxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8_m( @@ -715,7 +715,7 @@ void test_vsuxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4_m( @@ -724,7 +724,7 @@ void test_vsuxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2_m( @@ -733,7 +733,7 @@ void test_vsuxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1_m( @@ -742,7 +742,7 @@ void test_vsuxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2_m( @@ -751,7 +751,7 @@ void test_vsuxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4_m( @@ -760,7 +760,7 @@ void test_vsuxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4_m( @@ -769,7 +769,7 @@ void test_vsuxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2_m( @@ -778,7 +778,7 @@ void test_vsuxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1_m( @@ -787,7 +787,7 @@ void test_vsuxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2_m( @@ -796,7 +796,7 @@ void test_vsuxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4_m( @@ -805,7 +805,7 @@ void test_vsuxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2_m( @@ -814,7 +814,7 @@ void test_vsuxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1_m( @@ -823,7 +823,7 @@ void test_vsuxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2_m( @@ -832,7 +832,7 @@ void test_vsuxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4_m( @@ -841,7 +841,7 @@ void test_vsuxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1_m( @@ -850,7 +850,7 @@ void test_vsuxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2_m( @@ -859,7 +859,7 @@ void test_vsuxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); } // CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4_m( @@ -868,6 +868,6 @@ void test_vsuxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg2ei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); + return __riscv_vsuxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c index 2fb585926e44..958defab56e2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsuxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1( @@ -58,7 +58,7 @@ void test_vsuxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2( @@ -67,7 +67,7 @@ void test_vsuxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1( @@ -76,7 +76,7 @@ void test_vsuxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2( @@ -85,7 +85,7 @@ void test_vsuxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsuxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsuxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsuxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1( @@ -121,7 +121,7 @@ void test_vsuxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2( @@ -130,7 +130,7 @@ void test_vsuxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsuxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsuxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1( @@ -157,7 +157,7 @@ void test_vsuxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2( @@ -166,7 +166,7 @@ void test_vsuxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsuxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1( @@ -184,7 +184,7 @@ void test_vsuxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2( @@ -193,7 +193,7 @@ void test_vsuxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1( @@ -202,7 +202,7 @@ void test_vsuxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2( @@ -211,7 +211,7 @@ void test_vsuxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsuxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsuxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsuxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1( @@ -247,7 +247,7 @@ void test_vsuxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2( @@ -256,7 +256,7 @@ void test_vsuxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsuxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsuxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1( @@ -283,7 +283,7 @@ void test_vsuxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2( @@ -292,7 +292,7 @@ void test_vsuxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsuxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1( @@ -310,7 +310,7 @@ void test_vsuxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2( @@ -319,7 +319,7 @@ void test_vsuxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1( @@ -328,7 +328,7 @@ void test_vsuxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2( @@ -337,7 +337,7 @@ void test_vsuxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsuxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsuxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsuxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsuxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsuxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsuxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsuxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsuxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsuxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsuxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsuxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsuxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsuxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsuxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsuxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsuxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsuxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsuxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsuxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsuxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsuxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsuxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsuxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsuxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsuxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsuxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsuxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsuxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsuxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsuxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsuxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsuxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsuxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsuxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsuxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsuxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsuxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c index 620ffb801b04..0b7f5934c479 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsuxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1( @@ -58,7 +58,7 @@ void test_vsuxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2( @@ -67,7 +67,7 @@ void test_vsuxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1( @@ -76,7 +76,7 @@ void test_vsuxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2( @@ -85,7 +85,7 @@ void test_vsuxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsuxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsuxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsuxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1( @@ -121,7 +121,7 @@ void test_vsuxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2( @@ -130,7 +130,7 @@ void test_vsuxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsuxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsuxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1( @@ -157,7 +157,7 @@ void test_vsuxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2( @@ -166,7 +166,7 @@ void test_vsuxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsuxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1( @@ -184,7 +184,7 @@ void test_vsuxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2( @@ -193,7 +193,7 @@ void test_vsuxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1( @@ -202,7 +202,7 @@ void test_vsuxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2( @@ -211,7 +211,7 @@ void test_vsuxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsuxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsuxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsuxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1( @@ -247,7 +247,7 @@ void test_vsuxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2( @@ -256,7 +256,7 @@ void test_vsuxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsuxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsuxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1( @@ -283,7 +283,7 @@ void test_vsuxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2( @@ -292,7 +292,7 @@ void test_vsuxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsuxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1( @@ -310,7 +310,7 @@ void test_vsuxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2( @@ -319,7 +319,7 @@ void test_vsuxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1( @@ -328,7 +328,7 @@ void test_vsuxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2( @@ -337,7 +337,7 @@ void test_vsuxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsuxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsuxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsuxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsuxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsuxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsuxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsuxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsuxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsuxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsuxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsuxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsuxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsuxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsuxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsuxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsuxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsuxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsuxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsuxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsuxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsuxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsuxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsuxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsuxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsuxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsuxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsuxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsuxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsuxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsuxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsuxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsuxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsuxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsuxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsuxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsuxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsuxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c index 89834a953a44..1ce4435a29a0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsuxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1( @@ -58,7 +58,7 @@ void test_vsuxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2( @@ -67,7 +67,7 @@ void test_vsuxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1( @@ -76,7 +76,7 @@ void test_vsuxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2( @@ -85,7 +85,7 @@ void test_vsuxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsuxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsuxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsuxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1( @@ -121,7 +121,7 @@ void test_vsuxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4( @@ -130,7 +130,7 @@ void test_vsuxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2( @@ -139,7 +139,7 @@ void test_vsuxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1( @@ -148,7 +148,7 @@ void test_vsuxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2( @@ -157,7 +157,7 @@ void test_vsuxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2( @@ -166,7 +166,7 @@ void test_vsuxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1( @@ -175,7 +175,7 @@ void test_vsuxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2( @@ -184,7 +184,7 @@ void test_vsuxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1( @@ -193,7 +193,7 @@ void test_vsuxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2( @@ -202,7 +202,7 @@ void test_vsuxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8( @@ -211,7 +211,7 @@ void test_vsuxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4( @@ -220,7 +220,7 @@ void test_vsuxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2( @@ -229,7 +229,7 @@ void test_vsuxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1( @@ -238,7 +238,7 @@ void test_vsuxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4( @@ -247,7 +247,7 @@ void test_vsuxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2( @@ -256,7 +256,7 @@ void test_vsuxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1( @@ -265,7 +265,7 @@ void test_vsuxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2( @@ -274,7 +274,7 @@ void test_vsuxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2( @@ -283,7 +283,7 @@ void test_vsuxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1( @@ -292,7 +292,7 @@ void test_vsuxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2( @@ -301,7 +301,7 @@ void test_vsuxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1( @@ -310,7 +310,7 @@ void test_vsuxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2( @@ -319,7 +319,7 @@ void test_vsuxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4_m( @@ -328,7 +328,7 @@ void test_vsuxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2_m( @@ -337,7 +337,7 @@ void test_vsuxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1_m( @@ -346,7 +346,7 @@ void test_vsuxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2_m( @@ -355,7 +355,7 @@ void test_vsuxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2_m( @@ -382,7 +382,7 @@ void test_vsuxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1_m( @@ -391,7 +391,7 @@ void test_vsuxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2_m( @@ -400,7 +400,7 @@ void test_vsuxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8_m( @@ -409,7 +409,7 @@ void test_vsuxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4_m( @@ -418,7 +418,7 @@ void test_vsuxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2_m( @@ -427,7 +427,7 @@ void test_vsuxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1_m( @@ -436,7 +436,7 @@ void test_vsuxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4_m( @@ -445,7 +445,7 @@ void test_vsuxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2_m( @@ -472,7 +472,7 @@ void test_vsuxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2_m( @@ -481,7 +481,7 @@ void test_vsuxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1_m( @@ -490,7 +490,7 @@ void test_vsuxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2_m( @@ -499,7 +499,7 @@ void test_vsuxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1_m( @@ -508,7 +508,7 @@ void test_vsuxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2_m( @@ -517,7 +517,7 @@ void test_vsuxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8_m( @@ -526,7 +526,7 @@ void test_vsuxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4_m( @@ -535,7 +535,7 @@ void test_vsuxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2_m( @@ -544,7 +544,7 @@ void test_vsuxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1_m( @@ -553,7 +553,7 @@ void test_vsuxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4_m( @@ -562,7 +562,7 @@ void test_vsuxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2_m( @@ -571,7 +571,7 @@ void test_vsuxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1_m( @@ -580,7 +580,7 @@ void test_vsuxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2_m( @@ -589,7 +589,7 @@ void test_vsuxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2_m( @@ -598,7 +598,7 @@ void test_vsuxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1_m( @@ -607,7 +607,7 @@ void test_vsuxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2_m( @@ -616,7 +616,7 @@ void test_vsuxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1_m( @@ -625,7 +625,7 @@ void test_vsuxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2_m( @@ -634,6 +634,6 @@ void test_vsuxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c index ebf93eeca43b..a421adafb916 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsuxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1( @@ -58,7 +58,7 @@ void test_vsuxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2( @@ -67,7 +67,7 @@ void test_vsuxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1( @@ -76,7 +76,7 @@ void test_vsuxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2( @@ -85,7 +85,7 @@ void test_vsuxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsuxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsuxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsuxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1( @@ -121,7 +121,7 @@ void test_vsuxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2( @@ -130,7 +130,7 @@ void test_vsuxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsuxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsuxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1( @@ -157,7 +157,7 @@ void test_vsuxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2( @@ -166,7 +166,7 @@ void test_vsuxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsuxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1( @@ -184,7 +184,7 @@ void test_vsuxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2( @@ -193,7 +193,7 @@ void test_vsuxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1( @@ -202,7 +202,7 @@ void test_vsuxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2( @@ -211,7 +211,7 @@ void test_vsuxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsuxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf8(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u8mf8(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsuxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u8mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsuxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u8mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1( @@ -247,7 +247,7 @@ void test_vsuxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u8m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2( @@ -256,7 +256,7 @@ void test_vsuxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u8m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsuxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf4(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u16mf4(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsuxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u16mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1( @@ -283,7 +283,7 @@ void test_vsuxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u16m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2( @@ -292,7 +292,7 @@ void test_vsuxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u16m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsuxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32mf2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u32mf2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1( @@ -310,7 +310,7 @@ void test_vsuxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u32m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2( @@ -319,7 +319,7 @@ void test_vsuxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u32m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1( @@ -328,7 +328,7 @@ void test_vsuxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m1(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u64m1(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2( @@ -337,7 +337,7 @@ void test_vsuxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m2(base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u64m2(base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsuxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsuxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsuxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsuxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsuxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsuxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsuxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsuxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsuxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsuxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsuxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsuxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsuxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsuxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsuxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsuxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsuxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsuxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsuxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsuxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsuxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsuxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsuxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsuxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsuxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsuxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsuxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsuxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsuxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsuxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsuxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsuxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsuxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsuxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsuxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsuxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } // CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsuxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg3ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); + return __riscv_vsuxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c index 85dd45ff704a..02c16469e90a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsuxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1( @@ -58,7 +58,7 @@ void test_vsuxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2( @@ -67,7 +67,7 @@ void test_vsuxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1( @@ -76,7 +76,7 @@ void test_vsuxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2( @@ -85,7 +85,7 @@ void test_vsuxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsuxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsuxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsuxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1( @@ -121,7 +121,7 @@ void test_vsuxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2( @@ -130,7 +130,7 @@ void test_vsuxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsuxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsuxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1( @@ -157,7 +157,7 @@ void test_vsuxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2( @@ -166,7 +166,7 @@ void test_vsuxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsuxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1( @@ -184,7 +184,7 @@ void test_vsuxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2( @@ -193,7 +193,7 @@ void test_vsuxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1( @@ -202,7 +202,7 @@ void test_vsuxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2( @@ -211,7 +211,7 @@ void test_vsuxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsuxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsuxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsuxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1( @@ -247,7 +247,7 @@ void test_vsuxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2( @@ -256,7 +256,7 @@ void test_vsuxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsuxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsuxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1( @@ -283,7 +283,7 @@ void test_vsuxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2( @@ -292,7 +292,7 @@ void test_vsuxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsuxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1( @@ -310,7 +310,7 @@ void test_vsuxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2( @@ -319,7 +319,7 @@ void test_vsuxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1( @@ -328,7 +328,7 @@ void test_vsuxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2( @@ -337,7 +337,7 @@ void test_vsuxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsuxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsuxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsuxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsuxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsuxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsuxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsuxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsuxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsuxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsuxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsuxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsuxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsuxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsuxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsuxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsuxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsuxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsuxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsuxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsuxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsuxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsuxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsuxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsuxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsuxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsuxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsuxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsuxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsuxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsuxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsuxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsuxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsuxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsuxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsuxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsuxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsuxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c index 53afb84b7e03..94a615539c7a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsuxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1( @@ -58,7 +58,7 @@ void test_vsuxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2( @@ -67,7 +67,7 @@ void test_vsuxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1( @@ -76,7 +76,7 @@ void test_vsuxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2( @@ -85,7 +85,7 @@ void test_vsuxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsuxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsuxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsuxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1( @@ -121,7 +121,7 @@ void test_vsuxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2( @@ -130,7 +130,7 @@ void test_vsuxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsuxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsuxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1( @@ -157,7 +157,7 @@ void test_vsuxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2( @@ -166,7 +166,7 @@ void test_vsuxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsuxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1( @@ -184,7 +184,7 @@ void test_vsuxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2( @@ -193,7 +193,7 @@ void test_vsuxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1( @@ -202,7 +202,7 @@ void test_vsuxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2( @@ -211,7 +211,7 @@ void test_vsuxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsuxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsuxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsuxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1( @@ -247,7 +247,7 @@ void test_vsuxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2( @@ -256,7 +256,7 @@ void test_vsuxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsuxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsuxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1( @@ -283,7 +283,7 @@ void test_vsuxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2( @@ -292,7 +292,7 @@ void test_vsuxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsuxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1( @@ -310,7 +310,7 @@ void test_vsuxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2( @@ -319,7 +319,7 @@ void test_vsuxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1( @@ -328,7 +328,7 @@ void test_vsuxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2( @@ -337,7 +337,7 @@ void test_vsuxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsuxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsuxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsuxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsuxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsuxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsuxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsuxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsuxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsuxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsuxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsuxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsuxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsuxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsuxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsuxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsuxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsuxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsuxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsuxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsuxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsuxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsuxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsuxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsuxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsuxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsuxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsuxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsuxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsuxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsuxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsuxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsuxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsuxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsuxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsuxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsuxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsuxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c index 3ed750afec6e..75c1b6dcaeca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsuxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1( @@ -58,7 +58,7 @@ void test_vsuxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2( @@ -67,7 +67,7 @@ void test_vsuxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1( @@ -76,7 +76,7 @@ void test_vsuxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2( @@ -85,7 +85,7 @@ void test_vsuxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsuxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsuxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsuxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1( @@ -121,7 +121,7 @@ void test_vsuxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4( @@ -130,7 +130,7 @@ void test_vsuxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2( @@ -139,7 +139,7 @@ void test_vsuxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1( @@ -148,7 +148,7 @@ void test_vsuxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2( @@ -157,7 +157,7 @@ void test_vsuxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2( @@ -166,7 +166,7 @@ void test_vsuxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1( @@ -175,7 +175,7 @@ void test_vsuxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2( @@ -184,7 +184,7 @@ void test_vsuxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1( @@ -193,7 +193,7 @@ void test_vsuxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2( @@ -202,7 +202,7 @@ void test_vsuxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8( @@ -211,7 +211,7 @@ void test_vsuxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4( @@ -220,7 +220,7 @@ void test_vsuxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2( @@ -229,7 +229,7 @@ void test_vsuxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1( @@ -238,7 +238,7 @@ void test_vsuxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4( @@ -247,7 +247,7 @@ void test_vsuxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2( @@ -256,7 +256,7 @@ void test_vsuxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1( @@ -265,7 +265,7 @@ void test_vsuxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2( @@ -274,7 +274,7 @@ void test_vsuxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2( @@ -283,7 +283,7 @@ void test_vsuxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1( @@ -292,7 +292,7 @@ void test_vsuxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2( @@ -301,7 +301,7 @@ void test_vsuxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1( @@ -310,7 +310,7 @@ void test_vsuxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2( @@ -319,7 +319,7 @@ void test_vsuxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4_m( @@ -328,7 +328,7 @@ void test_vsuxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2_m( @@ -337,7 +337,7 @@ void test_vsuxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1_m( @@ -346,7 +346,7 @@ void test_vsuxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2_m( @@ -355,7 +355,7 @@ void test_vsuxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2_m( @@ -382,7 +382,7 @@ void test_vsuxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1_m( @@ -391,7 +391,7 @@ void test_vsuxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2_m( @@ -400,7 +400,7 @@ void test_vsuxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8_m( @@ -409,7 +409,7 @@ void test_vsuxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4_m( @@ -418,7 +418,7 @@ void test_vsuxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2_m( @@ -427,7 +427,7 @@ void test_vsuxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1_m( @@ -436,7 +436,7 @@ void test_vsuxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4_m( @@ -445,7 +445,7 @@ void test_vsuxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2_m( @@ -472,7 +472,7 @@ void test_vsuxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2_m( @@ -481,7 +481,7 @@ void test_vsuxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1_m( @@ -490,7 +490,7 @@ void test_vsuxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2_m( @@ -499,7 +499,7 @@ void test_vsuxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1_m( @@ -508,7 +508,7 @@ void test_vsuxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2_m( @@ -517,7 +517,7 @@ void test_vsuxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8_m( @@ -526,7 +526,7 @@ void test_vsuxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4_m( @@ -535,7 +535,7 @@ void test_vsuxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2_m( @@ -544,7 +544,7 @@ void test_vsuxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1_m( @@ -553,7 +553,7 @@ void test_vsuxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4_m( @@ -562,7 +562,7 @@ void test_vsuxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2_m( @@ -571,7 +571,7 @@ void test_vsuxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1_m( @@ -580,7 +580,7 @@ void test_vsuxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2_m( @@ -589,7 +589,7 @@ void test_vsuxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2_m( @@ -598,7 +598,7 @@ void test_vsuxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1_m( @@ -607,7 +607,7 @@ void test_vsuxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2_m( @@ -616,7 +616,7 @@ void test_vsuxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1_m( @@ -625,7 +625,7 @@ void test_vsuxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2_m( @@ -634,6 +634,6 @@ void test_vsuxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c index 82b4916c81f2..232d491b6e43 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2( @@ -40,7 +40,7 @@ void test_vsuxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2( @@ -49,7 +49,7 @@ void test_vsuxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1( @@ -58,7 +58,7 @@ void test_vsuxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2( @@ -67,7 +67,7 @@ void test_vsuxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1( @@ -76,7 +76,7 @@ void test_vsuxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2( @@ -85,7 +85,7 @@ void test_vsuxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8( @@ -94,7 +94,7 @@ void test_vsuxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4( @@ -103,7 +103,7 @@ void test_vsuxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2( @@ -112,7 +112,7 @@ void test_vsuxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1( @@ -121,7 +121,7 @@ void test_vsuxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2( @@ -130,7 +130,7 @@ void test_vsuxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4( @@ -139,7 +139,7 @@ void test_vsuxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2( @@ -148,7 +148,7 @@ void test_vsuxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1( @@ -157,7 +157,7 @@ void test_vsuxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2( @@ -166,7 +166,7 @@ void test_vsuxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2( @@ -175,7 +175,7 @@ void test_vsuxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1( @@ -184,7 +184,7 @@ void test_vsuxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2( @@ -193,7 +193,7 @@ void test_vsuxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1( @@ -202,7 +202,7 @@ void test_vsuxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2( @@ -211,7 +211,7 @@ void test_vsuxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8( @@ -220,7 +220,7 @@ void test_vsuxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4( @@ -229,7 +229,7 @@ void test_vsuxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2( @@ -238,7 +238,7 @@ void test_vsuxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1( @@ -247,7 +247,7 @@ void test_vsuxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u8m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2( @@ -256,7 +256,7 @@ void test_vsuxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u8m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4( @@ -265,7 +265,7 @@ void test_vsuxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2( @@ -274,7 +274,7 @@ void test_vsuxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1( @@ -283,7 +283,7 @@ void test_vsuxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u16m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2( @@ -292,7 +292,7 @@ void test_vsuxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u16m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2( @@ -301,7 +301,7 @@ void test_vsuxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1( @@ -310,7 +310,7 @@ void test_vsuxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u32m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2( @@ -319,7 +319,7 @@ void test_vsuxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u32m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1( @@ -328,7 +328,7 @@ void test_vsuxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m1(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u64m1(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2( @@ -337,7 +337,7 @@ void test_vsuxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m2(base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u64m2(base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4_m( @@ -346,7 +346,7 @@ void test_vsuxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2_m( @@ -355,7 +355,7 @@ void test_vsuxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1_m( @@ -364,7 +364,7 @@ void test_vsuxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2_m( @@ -373,7 +373,7 @@ void test_vsuxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2_m( @@ -382,7 +382,7 @@ void test_vsuxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1_m( @@ -391,7 +391,7 @@ void test_vsuxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2_m( @@ -400,7 +400,7 @@ void test_vsuxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1_m( @@ -409,7 +409,7 @@ void test_vsuxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2_m( @@ -418,7 +418,7 @@ void test_vsuxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8_m( @@ -427,7 +427,7 @@ void test_vsuxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4_m( @@ -436,7 +436,7 @@ void test_vsuxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2_m( @@ -445,7 +445,7 @@ void test_vsuxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1_m( @@ -454,7 +454,7 @@ void test_vsuxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2_m( @@ -463,7 +463,7 @@ void test_vsuxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4_m( @@ -472,7 +472,7 @@ void test_vsuxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2_m( @@ -481,7 +481,7 @@ void test_vsuxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1_m( @@ -490,7 +490,7 @@ void test_vsuxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2_m( @@ -499,7 +499,7 @@ void test_vsuxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2_m( @@ -508,7 +508,7 @@ void test_vsuxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1_m( @@ -517,7 +517,7 @@ void test_vsuxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2_m( @@ -526,7 +526,7 @@ void test_vsuxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1_m( @@ -535,7 +535,7 @@ void test_vsuxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2_m( @@ -544,7 +544,7 @@ void test_vsuxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8_m( @@ -553,7 +553,7 @@ void test_vsuxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4_m( @@ -562,7 +562,7 @@ void test_vsuxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2_m( @@ -571,7 +571,7 @@ void test_vsuxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1_m( @@ -580,7 +580,7 @@ void test_vsuxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2_m( @@ -589,7 +589,7 @@ void test_vsuxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4_m( @@ -598,7 +598,7 @@ void test_vsuxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2_m( @@ -607,7 +607,7 @@ void test_vsuxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1_m( @@ -616,7 +616,7 @@ void test_vsuxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2_m( @@ -625,7 +625,7 @@ void test_vsuxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2_m( @@ -634,7 +634,7 @@ void test_vsuxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1_m( @@ -643,7 +643,7 @@ void test_vsuxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2_m( @@ -652,7 +652,7 @@ void test_vsuxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1_m( @@ -661,7 +661,7 @@ void test_vsuxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } // CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2_m( @@ -670,6 +670,6 @@ void test_vsuxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg4ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); + return __riscv_vsuxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c index ee82aaadf594..f19c2e282cf0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c index bc9e288100dc..618f498a73d2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c index 4ac1491ff812..b8f7d0b94b17 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c index 4a9af3f6ce31..2d38799a10fa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } // CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg5ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); + return __riscv_vsuxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c index 76b2d473326c..6692a149ec79 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c index 1d1e15848cf0..98a390160add 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c index 6bddd6bcb48b..268653a348de 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c index 28b524cacb67..f431f1162175 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } // CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg6ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); + return __riscv_vsuxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c index acd2a2bfbc62..46db813f143f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c index 51770e933afa..3390afbf0a0e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c index a7ab6e022bb1..3b82241e6ad8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c index 3f02ed73b5fb..e37c9d513831 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } // CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg7ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); + return __riscv_vsuxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c index 708f031db053..c51941198df7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c index c477428c5fe1..fa666dd76beb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t b // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c index 1b88ff527989..97a1d39cc3c3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bi // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c index bfa07a1e9097..b39d54f86363 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2( @@ -22,7 +22,7 @@ void test_vsuxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1( @@ -31,7 +31,7 @@ void test_vsuxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2( @@ -40,7 +40,7 @@ void test_vsuxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1( @@ -49,7 +49,7 @@ void test_vsuxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1( @@ -58,7 +58,7 @@ void test_vsuxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8( @@ -67,7 +67,7 @@ void test_vsuxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4( @@ -76,7 +76,7 @@ void test_vsuxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2( @@ -85,7 +85,7 @@ void test_vsuxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1( @@ -94,7 +94,7 @@ void test_vsuxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4( @@ -103,7 +103,7 @@ void test_vsuxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2( @@ -112,7 +112,7 @@ void test_vsuxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1( @@ -121,7 +121,7 @@ void test_vsuxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2( @@ -130,7 +130,7 @@ void test_vsuxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1( @@ -139,7 +139,7 @@ void test_vsuxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1( @@ -148,7 +148,7 @@ void test_vsuxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8( @@ -157,7 +157,7 @@ void test_vsuxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4( @@ -166,7 +166,7 @@ void test_vsuxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2( @@ -175,7 +175,7 @@ void test_vsuxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1( @@ -184,7 +184,7 @@ void test_vsuxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4( @@ -193,7 +193,7 @@ void test_vsuxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vu // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2( @@ -202,7 +202,7 @@ void test_vsuxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1( @@ -211,7 +211,7 @@ void test_vsuxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2( @@ -220,7 +220,7 @@ void test_vsuxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1( @@ -229,7 +229,7 @@ void test_vsuxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1( @@ -238,7 +238,7 @@ void test_vsuxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4_m( @@ -247,7 +247,7 @@ void test_vsuxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0 // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2_m( @@ -256,7 +256,7 @@ void test_vsuxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1_m( @@ -265,7 +265,7 @@ void test_vsuxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2_m( @@ -274,7 +274,7 @@ void test_vsuxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1_m( @@ -283,7 +283,7 @@ void test_vsuxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1_m( @@ -292,7 +292,7 @@ void test_vsuxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8_m( @@ -301,7 +301,7 @@ void test_vsuxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4_m( @@ -310,7 +310,7 @@ void test_vsuxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2_m( @@ -319,7 +319,7 @@ void test_vsuxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1_m( @@ -328,7 +328,7 @@ void test_vsuxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4_m( @@ -337,7 +337,7 @@ void test_vsuxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, v // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2_m( @@ -346,7 +346,7 @@ void test_vsuxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1_m( @@ -355,7 +355,7 @@ void test_vsuxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2_m( @@ -364,7 +364,7 @@ void test_vsuxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1_m( @@ -373,7 +373,7 @@ void test_vsuxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1_m( @@ -382,7 +382,7 @@ void test_vsuxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8_m( @@ -391,7 +391,7 @@ void test_vsuxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4_m( @@ -400,7 +400,7 @@ void test_vsuxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2_m( @@ -409,7 +409,7 @@ void test_vsuxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1_m( @@ -418,7 +418,7 @@ void test_vsuxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t binde // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4_m( @@ -427,7 +427,7 @@ void test_vsuxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2_m( @@ -436,7 +436,7 @@ void test_vsuxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1_m( @@ -445,7 +445,7 @@ void test_vsuxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2_m( @@ -454,7 +454,7 @@ void test_vsuxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1_m( @@ -463,7 +463,7 @@ void test_vsuxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bin // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } // CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1_m( @@ -472,6 +472,6 @@ void test_vsuxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bind // CHECK-RV64-NEXT: ret void // void test_vsuxseg8ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); + return __riscv_vsuxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vundefined.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vundefined.c index cfa5b93bfbff..d8171419f33e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vundefined.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vundefined.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret poison // vfloat16mf4_t test_vundefined_f16mf4() { - return vundefined_f16mf4(); + return __riscv_vundefined_f16mf4(); } // CHECK-RV64-LABEL: @test_vundefined_f16mf2( @@ -20,7 +20,7 @@ vfloat16mf4_t test_vundefined_f16mf4() { // CHECK-RV64-NEXT: ret poison // vfloat16mf2_t test_vundefined_f16mf2() { - return vundefined_f16mf2(); + return __riscv_vundefined_f16mf2(); } // CHECK-RV64-LABEL: @test_vundefined_f16m1( @@ -28,7 +28,7 @@ vfloat16mf2_t test_vundefined_f16mf2() { // CHECK-RV64-NEXT: ret poison // vfloat16m1_t test_vundefined_f16m1() { - return vundefined_f16m1(); + return __riscv_vundefined_f16m1(); } // CHECK-RV64-LABEL: @test_vundefined_f16m2( @@ -36,7 +36,7 @@ vfloat16m1_t test_vundefined_f16m1() { // CHECK-RV64-NEXT: ret poison // vfloat16m2_t test_vundefined_f16m2() { - return vundefined_f16m2(); + return __riscv_vundefined_f16m2(); } // CHECK-RV64-LABEL: @test_vundefined_f16m4( @@ -44,7 +44,7 @@ vfloat16m2_t test_vundefined_f16m2() { // CHECK-RV64-NEXT: ret poison // vfloat16m4_t test_vundefined_f16m4() { - return vundefined_f16m4(); + return __riscv_vundefined_f16m4(); } // CHECK-RV64-LABEL: @test_vundefined_f16m8( @@ -52,7 +52,7 @@ vfloat16m4_t test_vundefined_f16m4() { // CHECK-RV64-NEXT: ret poison // vfloat16m8_t test_vundefined_f16m8() { - return vundefined_f16m8(); + return __riscv_vundefined_f16m8(); } // CHECK-RV64-LABEL: @test_vundefined_f32mf2( @@ -60,7 +60,7 @@ vfloat16m8_t test_vundefined_f16m8() { // CHECK-RV64-NEXT: ret poison // vfloat32mf2_t test_vundefined_f32mf2() { - return vundefined_f32mf2(); + return __riscv_vundefined_f32mf2(); } // CHECK-RV64-LABEL: @test_vundefined_f32m1( @@ -68,7 +68,7 @@ vfloat32mf2_t test_vundefined_f32mf2() { // CHECK-RV64-NEXT: ret poison // vfloat32m1_t test_vundefined_f32m1() { - return vundefined_f32m1(); + return __riscv_vundefined_f32m1(); } // CHECK-RV64-LABEL: @test_vundefined_f32m2( @@ -76,7 +76,7 @@ vfloat32m1_t test_vundefined_f32m1() { // CHECK-RV64-NEXT: ret poison // vfloat32m2_t test_vundefined_f32m2() { - return vundefined_f32m2(); + return __riscv_vundefined_f32m2(); } // CHECK-RV64-LABEL: @test_vundefined_f32m4( @@ -84,7 +84,7 @@ vfloat32m2_t test_vundefined_f32m2() { // CHECK-RV64-NEXT: ret poison // vfloat32m4_t test_vundefined_f32m4() { - return vundefined_f32m4(); + return __riscv_vundefined_f32m4(); } // CHECK-RV64-LABEL: @test_vundefined_f32m8( @@ -92,7 +92,7 @@ vfloat32m4_t test_vundefined_f32m4() { // CHECK-RV64-NEXT: ret poison // vfloat32m8_t test_vundefined_f32m8() { - return vundefined_f32m8(); + return __riscv_vundefined_f32m8(); } // CHECK-RV64-LABEL: @test_vundefined_f64m1( @@ -100,7 +100,7 @@ vfloat32m8_t test_vundefined_f32m8() { // CHECK-RV64-NEXT: ret poison // vfloat64m1_t test_vundefined_f64m1() { - return vundefined_f64m1(); + return __riscv_vundefined_f64m1(); } // CHECK-RV64-LABEL: @test_vundefined_f64m2( @@ -108,7 +108,7 @@ vfloat64m1_t test_vundefined_f64m1() { // CHECK-RV64-NEXT: ret poison // vfloat64m2_t test_vundefined_f64m2() { - return vundefined_f64m2(); + return __riscv_vundefined_f64m2(); } // CHECK-RV64-LABEL: @test_vundefined_f64m4( @@ -116,7 +116,7 @@ vfloat64m2_t test_vundefined_f64m2() { // CHECK-RV64-NEXT: ret poison // vfloat64m4_t test_vundefined_f64m4() { - return vundefined_f64m4(); + return __riscv_vundefined_f64m4(); } // CHECK-RV64-LABEL: @test_vundefined_f64m8( @@ -124,7 +124,7 @@ vfloat64m4_t test_vundefined_f64m4() { // CHECK-RV64-NEXT: ret poison // vfloat64m8_t test_vundefined_f64m8() { - return vundefined_f64m8(); + return __riscv_vundefined_f64m8(); } // CHECK-RV64-LABEL: @test_vundefined_i8mf8( @@ -132,7 +132,7 @@ vfloat64m8_t test_vundefined_f64m8() { // CHECK-RV64-NEXT: ret poison // vint8mf8_t test_vundefined_i8mf8() { - return vundefined_i8mf8(); + return __riscv_vundefined_i8mf8(); } // CHECK-RV64-LABEL: @test_vundefined_i8mf4( @@ -140,7 +140,7 @@ vint8mf8_t test_vundefined_i8mf8() { // CHECK-RV64-NEXT: ret poison // vint8mf4_t test_vundefined_i8mf4() { - return vundefined_i8mf4(); + return __riscv_vundefined_i8mf4(); } // CHECK-RV64-LABEL: @test_vundefined_i8mf2( @@ -148,7 +148,7 @@ vint8mf4_t test_vundefined_i8mf4() { // CHECK-RV64-NEXT: ret poison // vint8mf2_t test_vundefined_i8mf2() { - return vundefined_i8mf2(); + return __riscv_vundefined_i8mf2(); } // CHECK-RV64-LABEL: @test_vundefined_i8m1( @@ -156,7 +156,7 @@ vint8mf2_t test_vundefined_i8mf2() { // CHECK-RV64-NEXT: ret poison // vint8m1_t test_vundefined_i8m1() { - return vundefined_i8m1(); + return __riscv_vundefined_i8m1(); } // CHECK-RV64-LABEL: @test_vundefined_i8m2( @@ -164,7 +164,7 @@ vint8m1_t test_vundefined_i8m1() { // CHECK-RV64-NEXT: ret poison // vint8m2_t test_vundefined_i8m2() { - return vundefined_i8m2(); + return __riscv_vundefined_i8m2(); } // CHECK-RV64-LABEL: @test_vundefined_i8m4( @@ -172,7 +172,7 @@ vint8m2_t test_vundefined_i8m2() { // CHECK-RV64-NEXT: ret poison // vint8m4_t test_vundefined_i8m4() { - return vundefined_i8m4(); + return __riscv_vundefined_i8m4(); } // CHECK-RV64-LABEL: @test_vundefined_i8m8( @@ -180,7 +180,7 @@ vint8m4_t test_vundefined_i8m4() { // CHECK-RV64-NEXT: ret poison // vint8m8_t test_vundefined_i8m8() { - return vundefined_i8m8(); + return __riscv_vundefined_i8m8(); } // CHECK-RV64-LABEL: @test_vundefined_i16mf4( @@ -188,7 +188,7 @@ vint8m8_t test_vundefined_i8m8() { // CHECK-RV64-NEXT: ret poison // vint16mf4_t test_vundefined_i16mf4() { - return vundefined_i16mf4(); + return __riscv_vundefined_i16mf4(); } // CHECK-RV64-LABEL: @test_vundefined_i16mf2( @@ -196,7 +196,7 @@ vint16mf4_t test_vundefined_i16mf4() { // CHECK-RV64-NEXT: ret poison // vint16mf2_t test_vundefined_i16mf2() { - return vundefined_i16mf2(); + return __riscv_vundefined_i16mf2(); } // CHECK-RV64-LABEL: @test_vundefined_i16m1( @@ -204,7 +204,7 @@ vint16mf2_t test_vundefined_i16mf2() { // CHECK-RV64-NEXT: ret poison // vint16m1_t test_vundefined_i16m1() { - return vundefined_i16m1(); + return __riscv_vundefined_i16m1(); } // CHECK-RV64-LABEL: @test_vundefined_i16m2( @@ -212,7 +212,7 @@ vint16m1_t test_vundefined_i16m1() { // CHECK-RV64-NEXT: ret poison // vint16m2_t test_vundefined_i16m2() { - return vundefined_i16m2(); + return __riscv_vundefined_i16m2(); } // CHECK-RV64-LABEL: @test_vundefined_i16m4( @@ -220,7 +220,7 @@ vint16m2_t test_vundefined_i16m2() { // CHECK-RV64-NEXT: ret poison // vint16m4_t test_vundefined_i16m4() { - return vundefined_i16m4(); + return __riscv_vundefined_i16m4(); } // CHECK-RV64-LABEL: @test_vundefined_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vundefined_i16m4() { // CHECK-RV64-NEXT: ret poison // vint16m8_t test_vundefined_i16m8() { - return vundefined_i16m8(); + return __riscv_vundefined_i16m8(); } // CHECK-RV64-LABEL: @test_vundefined_i32mf2( @@ -236,7 +236,7 @@ vint16m8_t test_vundefined_i16m8() { // CHECK-RV64-NEXT: ret poison // vint32mf2_t test_vundefined_i32mf2() { - return vundefined_i32mf2(); + return __riscv_vundefined_i32mf2(); } // CHECK-RV64-LABEL: @test_vundefined_i32m1( @@ -244,7 +244,7 @@ vint32mf2_t test_vundefined_i32mf2() { // CHECK-RV64-NEXT: ret poison // vint32m1_t test_vundefined_i32m1() { - return vundefined_i32m1(); + return __riscv_vundefined_i32m1(); } // CHECK-RV64-LABEL: @test_vundefined_i32m2( @@ -252,7 +252,7 @@ vint32m1_t test_vundefined_i32m1() { // CHECK-RV64-NEXT: ret poison // vint32m2_t test_vundefined_i32m2() { - return vundefined_i32m2(); + return __riscv_vundefined_i32m2(); } // CHECK-RV64-LABEL: @test_vundefined_i32m4( @@ -260,7 +260,7 @@ vint32m2_t test_vundefined_i32m2() { // CHECK-RV64-NEXT: ret poison // vint32m4_t test_vundefined_i32m4() { - return vundefined_i32m4(); + return __riscv_vundefined_i32m4(); } // CHECK-RV64-LABEL: @test_vundefined_i32m8( @@ -268,7 +268,7 @@ vint32m4_t test_vundefined_i32m4() { // CHECK-RV64-NEXT: ret poison // vint32m8_t test_vundefined_i32m8() { - return vundefined_i32m8(); + return __riscv_vundefined_i32m8(); } // CHECK-RV64-LABEL: @test_vundefined_i64m1( @@ -276,7 +276,7 @@ vint32m8_t test_vundefined_i32m8() { // CHECK-RV64-NEXT: ret poison // vint64m1_t test_vundefined_i64m1() { - return vundefined_i64m1(); + return __riscv_vundefined_i64m1(); } // CHECK-RV64-LABEL: @test_vundefined_i64m2( @@ -284,7 +284,7 @@ vint64m1_t test_vundefined_i64m1() { // CHECK-RV64-NEXT: ret poison // vint64m2_t test_vundefined_i64m2() { - return vundefined_i64m2(); + return __riscv_vundefined_i64m2(); } // CHECK-RV64-LABEL: @test_vundefined_i64m4( @@ -292,7 +292,7 @@ vint64m2_t test_vundefined_i64m2() { // CHECK-RV64-NEXT: ret poison // vint64m4_t test_vundefined_i64m4() { - return vundefined_i64m4(); + return __riscv_vundefined_i64m4(); } // CHECK-RV64-LABEL: @test_vundefined_i64m8( @@ -300,7 +300,7 @@ vint64m4_t test_vundefined_i64m4() { // CHECK-RV64-NEXT: ret poison // vint64m8_t test_vundefined_i64m8() { - return vundefined_i64m8(); + return __riscv_vundefined_i64m8(); } // CHECK-RV64-LABEL: @test_vundefined_u8mf8( @@ -308,7 +308,7 @@ vint64m8_t test_vundefined_i64m8() { // CHECK-RV64-NEXT: ret poison // vuint8mf8_t test_vundefined_u8mf8() { - return vundefined_u8mf8(); + return __riscv_vundefined_u8mf8(); } // CHECK-RV64-LABEL: @test_vundefined_u8mf4( @@ -316,7 +316,7 @@ vuint8mf8_t test_vundefined_u8mf8() { // CHECK-RV64-NEXT: ret poison // vuint8mf4_t test_vundefined_u8mf4() { - return vundefined_u8mf4(); + return __riscv_vundefined_u8mf4(); } // CHECK-RV64-LABEL: @test_vundefined_u8mf2( @@ -324,7 +324,7 @@ vuint8mf4_t test_vundefined_u8mf4() { // CHECK-RV64-NEXT: ret poison // vuint8mf2_t test_vundefined_u8mf2() { - return vundefined_u8mf2(); + return __riscv_vundefined_u8mf2(); } // CHECK-RV64-LABEL: @test_vundefined_u8m1( @@ -332,7 +332,7 @@ vuint8mf2_t test_vundefined_u8mf2() { // CHECK-RV64-NEXT: ret poison // vuint8m1_t test_vundefined_u8m1() { - return vundefined_u8m1(); + return __riscv_vundefined_u8m1(); } // CHECK-RV64-LABEL: @test_vundefined_u8m2( @@ -340,7 +340,7 @@ vuint8m1_t test_vundefined_u8m1() { // CHECK-RV64-NEXT: ret poison // vuint8m2_t test_vundefined_u8m2() { - return vundefined_u8m2(); + return __riscv_vundefined_u8m2(); } // CHECK-RV64-LABEL: @test_vundefined_u8m4( @@ -348,7 +348,7 @@ vuint8m2_t test_vundefined_u8m2() { // CHECK-RV64-NEXT: ret poison // vuint8m4_t test_vundefined_u8m4() { - return vundefined_u8m4(); + return __riscv_vundefined_u8m4(); } // CHECK-RV64-LABEL: @test_vundefined_u8m8( @@ -356,7 +356,7 @@ vuint8m4_t test_vundefined_u8m4() { // CHECK-RV64-NEXT: ret poison // vuint8m8_t test_vundefined_u8m8() { - return vundefined_u8m8(); + return __riscv_vundefined_u8m8(); } // CHECK-RV64-LABEL: @test_vundefined_u16mf4( @@ -364,7 +364,7 @@ vuint8m8_t test_vundefined_u8m8() { // CHECK-RV64-NEXT: ret poison // vuint16mf4_t test_vundefined_u16mf4() { - return vundefined_u16mf4(); + return __riscv_vundefined_u16mf4(); } // CHECK-RV64-LABEL: @test_vundefined_u16mf2( @@ -372,7 +372,7 @@ vuint16mf4_t test_vundefined_u16mf4() { // CHECK-RV64-NEXT: ret poison // vuint16mf2_t test_vundefined_u16mf2() { - return vundefined_u16mf2(); + return __riscv_vundefined_u16mf2(); } // CHECK-RV64-LABEL: @test_vundefined_u16m1( @@ -380,7 +380,7 @@ vuint16mf2_t test_vundefined_u16mf2() { // CHECK-RV64-NEXT: ret poison // vuint16m1_t test_vundefined_u16m1() { - return vundefined_u16m1(); + return __riscv_vundefined_u16m1(); } // CHECK-RV64-LABEL: @test_vundefined_u16m2( @@ -388,7 +388,7 @@ vuint16m1_t test_vundefined_u16m1() { // CHECK-RV64-NEXT: ret poison // vuint16m2_t test_vundefined_u16m2() { - return vundefined_u16m2(); + return __riscv_vundefined_u16m2(); } // CHECK-RV64-LABEL: @test_vundefined_u16m4( @@ -396,7 +396,7 @@ vuint16m2_t test_vundefined_u16m2() { // CHECK-RV64-NEXT: ret poison // vuint16m4_t test_vundefined_u16m4() { - return vundefined_u16m4(); + return __riscv_vundefined_u16m4(); } // CHECK-RV64-LABEL: @test_vundefined_u16m8( @@ -404,7 +404,7 @@ vuint16m4_t test_vundefined_u16m4() { // CHECK-RV64-NEXT: ret poison // vuint16m8_t test_vundefined_u16m8() { - return vundefined_u16m8(); + return __riscv_vundefined_u16m8(); } // CHECK-RV64-LABEL: @test_vundefined_u32mf2( @@ -412,7 +412,7 @@ vuint16m8_t test_vundefined_u16m8() { // CHECK-RV64-NEXT: ret poison // vuint32mf2_t test_vundefined_u32mf2() { - return vundefined_u32mf2(); + return __riscv_vundefined_u32mf2(); } // CHECK-RV64-LABEL: @test_vundefined_u32m1( @@ -420,7 +420,7 @@ vuint32mf2_t test_vundefined_u32mf2() { // CHECK-RV64-NEXT: ret poison // vuint32m1_t test_vundefined_u32m1() { - return vundefined_u32m1(); + return __riscv_vundefined_u32m1(); } // CHECK-RV64-LABEL: @test_vundefined_u32m2( @@ -428,7 +428,7 @@ vuint32m1_t test_vundefined_u32m1() { // CHECK-RV64-NEXT: ret poison // vuint32m2_t test_vundefined_u32m2() { - return vundefined_u32m2(); + return __riscv_vundefined_u32m2(); } // CHECK-RV64-LABEL: @test_vundefined_u32m4( @@ -436,7 +436,7 @@ vuint32m2_t test_vundefined_u32m2() { // CHECK-RV64-NEXT: ret poison // vuint32m4_t test_vundefined_u32m4() { - return vundefined_u32m4(); + return __riscv_vundefined_u32m4(); } // CHECK-RV64-LABEL: @test_vundefined_u32m8( @@ -444,7 +444,7 @@ vuint32m4_t test_vundefined_u32m4() { // CHECK-RV64-NEXT: ret poison // vuint32m8_t test_vundefined_u32m8() { - return vundefined_u32m8(); + return __riscv_vundefined_u32m8(); } // CHECK-RV64-LABEL: @test_vundefined_u64m1( @@ -452,7 +452,7 @@ vuint32m8_t test_vundefined_u32m8() { // CHECK-RV64-NEXT: ret poison // vuint64m1_t test_vundefined_u64m1() { - return vundefined_u64m1(); + return __riscv_vundefined_u64m1(); } // CHECK-RV64-LABEL: @test_vundefined_u64m2( @@ -460,7 +460,7 @@ vuint64m1_t test_vundefined_u64m1() { // CHECK-RV64-NEXT: ret poison // vuint64m2_t test_vundefined_u64m2() { - return vundefined_u64m2(); + return __riscv_vundefined_u64m2(); } // CHECK-RV64-LABEL: @test_vundefined_u64m4( @@ -468,7 +468,7 @@ vuint64m2_t test_vundefined_u64m2() { // CHECK-RV64-NEXT: ret poison // vuint64m4_t test_vundefined_u64m4() { - return vundefined_u64m4(); + return __riscv_vundefined_u64m4(); } // CHECK-RV64-LABEL: @test_vundefined_u64m8( @@ -476,6 +476,6 @@ vuint64m4_t test_vundefined_u64m4() { // CHECK-RV64-NEXT: ret poison // vuint64m8_t test_vundefined_u64m8() { - return vundefined_u64m8(); + return __riscv_vundefined_u64m8(); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwadd.c index 367e6e859ec2..07593304977c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwadd.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_vv_i16mf4(op1, op2, vl); + return __riscv_vwadd_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4( @@ -21,7 +21,7 @@ vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf4(op1, op2, vl); + return __riscv_vwadd_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4( @@ -30,7 +30,7 @@ vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_wv_i16mf4(op1, op2, vl); + return __riscv_vwadd_wv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4( @@ -39,7 +39,7 @@ vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf4(op1, op2, vl); + return __riscv_vwadd_wx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2( @@ -48,7 +48,7 @@ vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_vv_i16mf2(op1, op2, vl); + return __riscv_vwadd_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2( @@ -57,7 +57,7 @@ vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf2(op1, op2, vl); + return __riscv_vwadd_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2( @@ -66,7 +66,7 @@ vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_wv_i16mf2(op1, op2, vl); + return __riscv_vwadd_wv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2( @@ -75,7 +75,7 @@ vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf2(op1, op2, vl); + return __riscv_vwadd_wx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1( @@ -84,7 +84,7 @@ vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_vv_i16m1(op1, op2, vl); + return __riscv_vwadd_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1( @@ -93,7 +93,7 @@ vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m1(op1, op2, vl); + return __riscv_vwadd_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1( @@ -102,7 +102,7 @@ vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_wv_i16m1(op1, op2, vl); + return __riscv_vwadd_wv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1( @@ -111,7 +111,7 @@ vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m1(op1, op2, vl); + return __riscv_vwadd_wx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2( @@ -120,7 +120,7 @@ vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwadd_vv_i16m2(op1, op2, vl); + return __riscv_vwadd_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2( @@ -129,7 +129,7 @@ vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m2(op1, op2, vl); + return __riscv_vwadd_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2( @@ -138,7 +138,7 @@ vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwadd_wv_i16m2(op1, op2, vl); + return __riscv_vwadd_wv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2( @@ -147,7 +147,7 @@ vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m2(op1, op2, vl); + return __riscv_vwadd_wx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4( @@ -156,7 +156,7 @@ vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwadd_vv_i16m4(op1, op2, vl); + return __riscv_vwadd_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4( @@ -165,7 +165,7 @@ vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m4(op1, op2, vl); + return __riscv_vwadd_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4( @@ -174,7 +174,7 @@ vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwadd_wv_i16m4(op1, op2, vl); + return __riscv_vwadd_wv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4( @@ -183,7 +183,7 @@ vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m4(op1, op2, vl); + return __riscv_vwadd_wx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8( @@ -192,7 +192,7 @@ vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwadd_vv_i16m8(op1, op2, vl); + return __riscv_vwadd_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8( @@ -201,7 +201,7 @@ vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m8(op1, op2, vl); + return __riscv_vwadd_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8( @@ -210,7 +210,7 @@ vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwadd_wv_i16m8(op1, op2, vl); + return __riscv_vwadd_wv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8( @@ -219,7 +219,7 @@ vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m8(op1, op2, vl); + return __riscv_vwadd_wx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2( @@ -228,7 +228,7 @@ vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_vv_i32mf2(op1, op2, vl); + return __riscv_vwadd_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2( @@ -237,7 +237,7 @@ vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32mf2(op1, op2, vl); + return __riscv_vwadd_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2( @@ -246,7 +246,7 @@ vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_wv_i32mf2(op1, op2, vl); + return __riscv_vwadd_wv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32mf2(op1, op2, vl); + return __riscv_vwadd_wx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_vv_i32m1(op1, op2, vl); + return __riscv_vwadd_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m1(op1, op2, vl); + return __riscv_vwadd_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1( @@ -282,7 +282,7 @@ vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_wv_i32m1(op1, op2, vl); + return __riscv_vwadd_wv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1( @@ -291,7 +291,7 @@ vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m1(op1, op2, vl); + return __riscv_vwadd_wx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2( @@ -300,7 +300,7 @@ vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwadd_vv_i32m2(op1, op2, vl); + return __riscv_vwadd_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2( @@ -309,7 +309,7 @@ vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m2(op1, op2, vl); + return __riscv_vwadd_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2( @@ -318,7 +318,7 @@ vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwadd_wv_i32m2(op1, op2, vl); + return __riscv_vwadd_wv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2( @@ -327,7 +327,7 @@ vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m2(op1, op2, vl); + return __riscv_vwadd_wx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4( @@ -336,7 +336,7 @@ vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwadd_vv_i32m4(op1, op2, vl); + return __riscv_vwadd_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4( @@ -345,7 +345,7 @@ vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m4(op1, op2, vl); + return __riscv_vwadd_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4( @@ -354,7 +354,7 @@ vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwadd_wv_i32m4(op1, op2, vl); + return __riscv_vwadd_wv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4( @@ -363,7 +363,7 @@ vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m4(op1, op2, vl); + return __riscv_vwadd_wx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8( @@ -372,7 +372,7 @@ vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwadd_vv_i32m8(op1, op2, vl); + return __riscv_vwadd_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8( @@ -381,7 +381,7 @@ vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m8(op1, op2, vl); + return __riscv_vwadd_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8( @@ -390,7 +390,7 @@ vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwadd_wv_i32m8(op1, op2, vl); + return __riscv_vwadd_wv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8( @@ -399,7 +399,7 @@ vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m8(op1, op2, vl); + return __riscv_vwadd_wx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1( @@ -408,7 +408,7 @@ vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_vv_i64m1(op1, op2, vl); + return __riscv_vwadd_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1( @@ -417,7 +417,7 @@ vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m1(op1, op2, vl); + return __riscv_vwadd_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1( @@ -426,7 +426,7 @@ vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_wv_i64m1(op1, op2, vl); + return __riscv_vwadd_wv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1( @@ -435,7 +435,7 @@ vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m1(op1, op2, vl); + return __riscv_vwadd_wx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2( @@ -444,7 +444,7 @@ vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwadd_vv_i64m2(op1, op2, vl); + return __riscv_vwadd_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2( @@ -453,7 +453,7 @@ vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m2(op1, op2, vl); + return __riscv_vwadd_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2( @@ -462,7 +462,7 @@ vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwadd_wv_i64m2(op1, op2, vl); + return __riscv_vwadd_wv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2( @@ -471,7 +471,7 @@ vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m2(op1, op2, vl); + return __riscv_vwadd_wx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4( @@ -480,7 +480,7 @@ vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwadd_vv_i64m4(op1, op2, vl); + return __riscv_vwadd_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4( @@ -489,7 +489,7 @@ vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m4(op1, op2, vl); + return __riscv_vwadd_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4( @@ -498,7 +498,7 @@ vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwadd_wv_i64m4(op1, op2, vl); + return __riscv_vwadd_wv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4( @@ -507,7 +507,7 @@ vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m4(op1, op2, vl); + return __riscv_vwadd_wx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8( @@ -516,7 +516,7 @@ vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwadd_vv_i64m8(op1, op2, vl); + return __riscv_vwadd_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8( @@ -525,7 +525,7 @@ vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m8(op1, op2, vl); + return __riscv_vwadd_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8( @@ -534,7 +534,7 @@ vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwadd_wv_i64m8(op1, op2, vl); + return __riscv_vwadd_wv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8( @@ -543,7 +543,7 @@ vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m8(op1, op2, vl); + return __riscv_vwadd_wx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m( @@ -552,7 +552,7 @@ vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m( @@ -561,7 +561,7 @@ vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m( @@ -570,7 +570,7 @@ vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_wv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m( @@ -579,7 +579,7 @@ vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m( @@ -588,7 +588,7 @@ vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m( @@ -597,7 +597,7 @@ vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m( @@ -606,7 +606,7 @@ vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_wv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m( @@ -615,7 +615,7 @@ vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m( @@ -624,7 +624,7 @@ vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m( @@ -633,7 +633,7 @@ vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m( @@ -642,7 +642,7 @@ vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_wv_i16m1_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m( @@ -651,7 +651,7 @@ vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m1_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m( @@ -660,7 +660,7 @@ vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwadd_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m( @@ -669,7 +669,7 @@ vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m( @@ -678,7 +678,7 @@ vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwadd_wv_i16m2_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m( @@ -687,7 +687,7 @@ vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m2_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m( @@ -696,7 +696,7 @@ vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwadd_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m( @@ -705,7 +705,7 @@ vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m( @@ -714,7 +714,7 @@ vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwadd_wv_i16m4_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m( @@ -723,7 +723,7 @@ vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m4_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m( @@ -732,7 +732,7 @@ vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwadd_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m( @@ -741,7 +741,7 @@ vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m( @@ -750,7 +750,7 @@ vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwadd_wv_i16m8_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m( @@ -759,7 +759,7 @@ vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m8_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m( @@ -768,7 +768,7 @@ vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m( @@ -777,7 +777,7 @@ vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m( @@ -786,7 +786,7 @@ vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_wv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m( @@ -795,7 +795,7 @@ vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m( @@ -804,7 +804,7 @@ vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m( @@ -813,7 +813,7 @@ vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m( @@ -822,7 +822,7 @@ vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_wv_i32m1_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m( @@ -831,7 +831,7 @@ vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m1_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m( @@ -840,7 +840,7 @@ vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwadd_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m( @@ -849,7 +849,7 @@ vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m( @@ -858,7 +858,7 @@ vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwadd_wv_i32m2_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m( @@ -867,7 +867,7 @@ vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m2_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m( @@ -876,7 +876,7 @@ vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwadd_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m( @@ -885,7 +885,7 @@ vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m( @@ -894,7 +894,7 @@ vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwadd_wv_i32m4_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m( @@ -903,7 +903,7 @@ vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m4_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m( @@ -912,7 +912,7 @@ vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwadd_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m( @@ -921,7 +921,7 @@ vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m( @@ -930,7 +930,7 @@ vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwadd_wv_i32m8_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m( @@ -939,7 +939,7 @@ vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m8_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m( @@ -948,7 +948,7 @@ vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m( @@ -957,7 +957,7 @@ vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m( @@ -966,7 +966,7 @@ vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_wv_i64m1_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m( @@ -975,7 +975,7 @@ vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m1_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m( @@ -984,7 +984,7 @@ vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwadd_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m( @@ -993,7 +993,7 @@ vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m( @@ -1002,7 +1002,7 @@ vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwadd_wv_i64m2_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m( @@ -1011,7 +1011,7 @@ vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m2_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m( @@ -1020,7 +1020,7 @@ vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwadd_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m( @@ -1029,7 +1029,7 @@ vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m( @@ -1038,7 +1038,7 @@ vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwadd_wv_i64m4_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m( @@ -1047,7 +1047,7 @@ vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m4_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m( @@ -1056,7 +1056,7 @@ vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwadd_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vwadd_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m( @@ -1065,7 +1065,7 @@ vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vwadd_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m( @@ -1074,7 +1074,7 @@ vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwadd_wv_i64m8_m(mask, op1, op2, vl); + return __riscv_vwadd_wv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m( @@ -1083,6 +1083,6 @@ vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m8_m(mask, op1, op2, vl); + return __riscv_vwadd_wx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwaddu.c index 52d28dffeafb..ddbd0390f50f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwaddu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_vv_u16mf4(op1, op2, vl); + return __riscv_vwaddu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4( @@ -21,7 +21,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf4(op1, op2, vl); + return __riscv_vwaddu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4( @@ -30,7 +30,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_wv_u16mf4(op1, op2, vl); + return __riscv_vwaddu_wv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4( @@ -39,7 +39,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf4(op1, op2, vl); + return __riscv_vwaddu_wx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2( @@ -48,7 +48,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_vv_u16mf2(op1, op2, vl); + return __riscv_vwaddu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2( @@ -57,7 +57,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf2(op1, op2, vl); + return __riscv_vwaddu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2( @@ -66,7 +66,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_wv_u16mf2(op1, op2, vl); + return __riscv_vwaddu_wv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2( @@ -75,7 +75,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf2(op1, op2, vl); + return __riscv_vwaddu_wx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1( @@ -84,7 +84,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_vv_u16m1(op1, op2, vl); + return __riscv_vwaddu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1( @@ -93,7 +93,7 @@ vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m1(op1, op2, vl); + return __riscv_vwaddu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1( @@ -102,7 +102,7 @@ vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_wv_u16m1(op1, op2, vl); + return __riscv_vwaddu_wv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1( @@ -111,7 +111,7 @@ vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m1(op1, op2, vl); + return __riscv_vwaddu_wx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2( @@ -120,7 +120,7 @@ vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_vv_u16m2(op1, op2, vl); + return __riscv_vwaddu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2( @@ -129,7 +129,7 @@ vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m2(op1, op2, vl); + return __riscv_vwaddu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2( @@ -138,7 +138,7 @@ vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_wv_u16m2(op1, op2, vl); + return __riscv_vwaddu_wv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2( @@ -147,7 +147,7 @@ vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m2(op1, op2, vl); + return __riscv_vwaddu_wx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4( @@ -156,7 +156,7 @@ vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_vv_u16m4(op1, op2, vl); + return __riscv_vwaddu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4( @@ -165,7 +165,7 @@ vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m4(op1, op2, vl); + return __riscv_vwaddu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4( @@ -174,7 +174,7 @@ vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_wv_u16m4(op1, op2, vl); + return __riscv_vwaddu_wv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4( @@ -183,7 +183,7 @@ vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m4(op1, op2, vl); + return __riscv_vwaddu_wx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8( @@ -192,7 +192,7 @@ vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_vv_u16m8(op1, op2, vl); + return __riscv_vwaddu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8( @@ -201,7 +201,7 @@ vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m8(op1, op2, vl); + return __riscv_vwaddu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8( @@ -210,7 +210,7 @@ vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_wv_u16m8(op1, op2, vl); + return __riscv_vwaddu_wv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8( @@ -219,7 +219,7 @@ vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m8(op1, op2, vl); + return __riscv_vwaddu_wx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2( @@ -228,7 +228,7 @@ vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_vv_u32mf2(op1, op2, vl); + return __riscv_vwaddu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2( @@ -237,7 +237,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32mf2(op1, op2, vl); + return __riscv_vwaddu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2( @@ -246,7 +246,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_wv_u32mf2(op1, op2, vl); + return __riscv_vwaddu_wv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32mf2(op1, op2, vl); + return __riscv_vwaddu_wx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_vv_u32m1(op1, op2, vl); + return __riscv_vwaddu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m1(op1, op2, vl); + return __riscv_vwaddu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1( @@ -282,7 +282,7 @@ vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_wv_u32m1(op1, op2, vl); + return __riscv_vwaddu_wv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1( @@ -291,7 +291,7 @@ vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m1(op1, op2, vl); + return __riscv_vwaddu_wx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2( @@ -300,7 +300,7 @@ vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_vv_u32m2(op1, op2, vl); + return __riscv_vwaddu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2( @@ -309,7 +309,7 @@ vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m2(op1, op2, vl); + return __riscv_vwaddu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2( @@ -318,7 +318,7 @@ vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_wv_u32m2(op1, op2, vl); + return __riscv_vwaddu_wv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2( @@ -327,7 +327,7 @@ vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m2(op1, op2, vl); + return __riscv_vwaddu_wx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4( @@ -336,7 +336,7 @@ vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_vv_u32m4(op1, op2, vl); + return __riscv_vwaddu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4( @@ -345,7 +345,7 @@ vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m4(op1, op2, vl); + return __riscv_vwaddu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4( @@ -354,7 +354,7 @@ vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_wv_u32m4(op1, op2, vl); + return __riscv_vwaddu_wv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4( @@ -363,7 +363,7 @@ vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m4(op1, op2, vl); + return __riscv_vwaddu_wx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8( @@ -372,7 +372,7 @@ vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_vv_u32m8(op1, op2, vl); + return __riscv_vwaddu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8( @@ -381,7 +381,7 @@ vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m8(op1, op2, vl); + return __riscv_vwaddu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8( @@ -390,7 +390,7 @@ vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_wv_u32m8(op1, op2, vl); + return __riscv_vwaddu_wv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8( @@ -399,7 +399,7 @@ vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m8(op1, op2, vl); + return __riscv_vwaddu_wx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1( @@ -408,7 +408,7 @@ vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_vv_u64m1(op1, op2, vl); + return __riscv_vwaddu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1( @@ -417,7 +417,7 @@ vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m1(op1, op2, vl); + return __riscv_vwaddu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1( @@ -426,7 +426,7 @@ vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_wv_u64m1(op1, op2, vl); + return __riscv_vwaddu_wv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1( @@ -435,7 +435,7 @@ vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m1(op1, op2, vl); + return __riscv_vwaddu_wx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2( @@ -444,7 +444,7 @@ vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_vv_u64m2(op1, op2, vl); + return __riscv_vwaddu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2( @@ -453,7 +453,7 @@ vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m2(op1, op2, vl); + return __riscv_vwaddu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2( @@ -462,7 +462,7 @@ vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_wv_u64m2(op1, op2, vl); + return __riscv_vwaddu_wv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2( @@ -471,7 +471,7 @@ vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m2(op1, op2, vl); + return __riscv_vwaddu_wx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4( @@ -480,7 +480,7 @@ vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_vv_u64m4(op1, op2, vl); + return __riscv_vwaddu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4( @@ -489,7 +489,7 @@ vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m4(op1, op2, vl); + return __riscv_vwaddu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4( @@ -498,7 +498,7 @@ vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_wv_u64m4(op1, op2, vl); + return __riscv_vwaddu_wv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4( @@ -507,7 +507,7 @@ vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m4(op1, op2, vl); + return __riscv_vwaddu_wx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8( @@ -516,7 +516,7 @@ vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_vv_u64m8(op1, op2, vl); + return __riscv_vwaddu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8( @@ -525,7 +525,7 @@ vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m8(op1, op2, vl); + return __riscv_vwaddu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8( @@ -534,7 +534,7 @@ vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_wv_u64m8(op1, op2, vl); + return __riscv_vwaddu_wv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8( @@ -543,7 +543,7 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m8(op1, op2, vl); + return __riscv_vwaddu_wx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m( @@ -552,7 +552,7 @@ vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m( @@ -561,7 +561,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m( @@ -570,7 +570,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_wv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m( @@ -579,7 +579,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m( @@ -588,7 +588,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m( @@ -597,7 +597,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m( @@ -606,7 +606,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_wv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m( @@ -615,7 +615,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m( @@ -624,7 +624,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m( @@ -633,7 +633,7 @@ vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m( @@ -642,7 +642,7 @@ vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_wv_u16m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m( @@ -651,7 +651,7 @@ vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m( @@ -660,7 +660,7 @@ vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m( @@ -669,7 +669,7 @@ vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m( @@ -678,7 +678,7 @@ vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_wv_u16m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m( @@ -687,7 +687,7 @@ vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m( @@ -696,7 +696,7 @@ vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m( @@ -705,7 +705,7 @@ vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m( @@ -714,7 +714,7 @@ vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_wv_u16m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m( @@ -723,7 +723,7 @@ vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m( @@ -732,7 +732,7 @@ vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m( @@ -741,7 +741,7 @@ vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m( @@ -750,7 +750,7 @@ vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_wv_u16m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m( @@ -759,7 +759,7 @@ vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m( @@ -768,7 +768,7 @@ vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m( @@ -777,7 +777,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m( @@ -786,7 +786,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_wv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m( @@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m( @@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m( @@ -813,7 +813,7 @@ vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m( @@ -822,7 +822,7 @@ vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_wv_u32m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m( @@ -831,7 +831,7 @@ vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m( @@ -840,7 +840,7 @@ vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m( @@ -849,7 +849,7 @@ vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m( @@ -858,7 +858,7 @@ vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_wv_u32m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m( @@ -867,7 +867,7 @@ vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m( @@ -876,7 +876,7 @@ vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m( @@ -885,7 +885,7 @@ vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m( @@ -894,7 +894,7 @@ vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_wv_u32m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m( @@ -903,7 +903,7 @@ vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m( @@ -912,7 +912,7 @@ vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m( @@ -921,7 +921,7 @@ vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m( @@ -930,7 +930,7 @@ vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_wv_u32m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m( @@ -939,7 +939,7 @@ vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m( @@ -948,7 +948,7 @@ vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m( @@ -957,7 +957,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m( @@ -966,7 +966,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_wv_u64m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m( @@ -975,7 +975,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m1_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m( @@ -984,7 +984,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m( @@ -993,7 +993,7 @@ vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m( @@ -1002,7 +1002,7 @@ vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_wv_u64m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m( @@ -1011,7 +1011,7 @@ vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m2_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m( @@ -1020,7 +1020,7 @@ vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m( @@ -1029,7 +1029,7 @@ vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m( @@ -1038,7 +1038,7 @@ vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_wv_u64m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m( @@ -1047,7 +1047,7 @@ vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m4_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m( @@ -1056,7 +1056,7 @@ vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m( @@ -1065,7 +1065,7 @@ vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_vx_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m( @@ -1074,7 +1074,7 @@ vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_wv_u64m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_wv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m( @@ -1083,6 +1083,6 @@ vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m8_m(mask, op1, op2, vl); + return __riscv_vwaddu_wx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvt.c index 41180bb393fb..8495be0f20d6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvt.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4(vint8mf8_t src, size_t vl) { - return vwcvt_x_x_v_i16mf4(src, vl); + return __riscv_vwcvt_x_x_v_i16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2( @@ -21,7 +21,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4(vint8mf8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2(vint8mf4_t src, size_t vl) { - return vwcvt_x_x_v_i16mf2(src, vl); + return __riscv_vwcvt_x_x_v_i16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1( @@ -30,7 +30,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2(vint8mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1(vint8mf2_t src, size_t vl) { - return vwcvt_x_x_v_i16m1(src, vl); + return __riscv_vwcvt_x_x_v_i16m1(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2( @@ -39,7 +39,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1(vint8mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2(vint8m1_t src, size_t vl) { - return vwcvt_x_x_v_i16m2(src, vl); + return __riscv_vwcvt_x_x_v_i16m2(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4( @@ -48,7 +48,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2(vint8m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4(vint8m2_t src, size_t vl) { - return vwcvt_x_x_v_i16m4(src, vl); + return __riscv_vwcvt_x_x_v_i16m4(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8( @@ -57,7 +57,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4(vint8m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8(vint8m4_t src, size_t vl) { - return vwcvt_x_x_v_i16m8(src, vl); + return __riscv_vwcvt_x_x_v_i16m8(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2( @@ -66,7 +66,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8(vint8m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2(vint16mf4_t src, size_t vl) { - return vwcvt_x_x_v_i32mf2(src, vl); + return __riscv_vwcvt_x_x_v_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1( @@ -75,7 +75,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2(vint16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1(vint16mf2_t src, size_t vl) { - return vwcvt_x_x_v_i32m1(src, vl); + return __riscv_vwcvt_x_x_v_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2( @@ -84,7 +84,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1(vint16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2(vint16m1_t src, size_t vl) { - return vwcvt_x_x_v_i32m2(src, vl); + return __riscv_vwcvt_x_x_v_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4( @@ -93,7 +93,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2(vint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4(vint16m2_t src, size_t vl) { - return vwcvt_x_x_v_i32m4(src, vl); + return __riscv_vwcvt_x_x_v_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8( @@ -102,7 +102,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4(vint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8(vint16m4_t src, size_t vl) { - return vwcvt_x_x_v_i32m8(src, vl); + return __riscv_vwcvt_x_x_v_i32m8(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1( @@ -111,7 +111,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8(vint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1(vint32mf2_t src, size_t vl) { - return vwcvt_x_x_v_i64m1(src, vl); + return __riscv_vwcvt_x_x_v_i64m1(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2( @@ -120,7 +120,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1(vint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2(vint32m1_t src, size_t vl) { - return vwcvt_x_x_v_i64m2(src, vl); + return __riscv_vwcvt_x_x_v_i64m2(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4( @@ -129,7 +129,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2(vint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4(vint32m2_t src, size_t vl) { - return vwcvt_x_x_v_i64m4(src, vl); + return __riscv_vwcvt_x_x_v_i64m4(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8( @@ -138,7 +138,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4(vint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8(vint32m4_t src, size_t vl) { - return vwcvt_x_x_v_i64m8(src, vl); + return __riscv_vwcvt_x_x_v_i64m8(src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m( @@ -147,7 +147,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8(vint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) { - return vwcvt_x_x_v_i16mf4_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m( @@ -156,7 +156,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) { - return vwcvt_x_x_v_i16mf2_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m( @@ -165,7 +165,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) { - return vwcvt_x_x_v_i16m1_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m( @@ -174,7 +174,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { - return vwcvt_x_x_v_i16m2_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m( @@ -183,7 +183,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { - return vwcvt_x_x_v_i16m4_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m( @@ -192,7 +192,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { - return vwcvt_x_x_v_i16m8_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m( @@ -201,7 +201,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return vwcvt_x_x_v_i32mf2_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m( @@ -210,7 +210,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return vwcvt_x_x_v_i32m1_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m( @@ -219,7 +219,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return vwcvt_x_x_v_i32m2_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m( @@ -228,7 +228,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return vwcvt_x_x_v_i32m4_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m( @@ -237,7 +237,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return vwcvt_x_x_v_i32m8_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m( @@ -246,7 +246,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return vwcvt_x_x_v_i64m1_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m( @@ -255,7 +255,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return vwcvt_x_x_v_i64m2_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m( @@ -264,7 +264,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return vwcvt_x_x_v_i64m4_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m( @@ -273,6 +273,6 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return vwcvt_x_x_v_i64m8_m(mask, src, vl); + return __riscv_vwcvt_x_x_v_i64m8_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvtu.c index a076f6aeeb93..68603b8bed83 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvtu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwcvtu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4(vuint8mf8_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf4(src, vl); + return __riscv_vwcvtu_x_x_v_u16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2( @@ -21,7 +21,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4(vuint8mf8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2(vuint8mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf2(src, vl); + return __riscv_vwcvtu_x_x_v_u16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1( @@ -30,7 +30,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2(vuint8mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1(vuint8mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m1(src, vl); + return __riscv_vwcvtu_x_x_v_u16m1(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2( @@ -39,7 +39,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1(vuint8mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2(vuint8m1_t src, size_t vl) { - return vwcvtu_x_x_v_u16m2(src, vl); + return __riscv_vwcvtu_x_x_v_u16m2(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4( @@ -48,7 +48,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2(vuint8m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4(vuint8m2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m4(src, vl); + return __riscv_vwcvtu_x_x_v_u16m4(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8( @@ -57,7 +57,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4(vuint8m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8(vuint8m4_t src, size_t vl) { - return vwcvtu_x_x_v_u16m8(src, vl); + return __riscv_vwcvtu_x_x_v_u16m8(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2( @@ -66,7 +66,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8(vuint8m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2(vuint16mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u32mf2(src, vl); + return __riscv_vwcvtu_x_x_v_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1( @@ -75,7 +75,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2(vuint16mf4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1(vuint16mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m1(src, vl); + return __riscv_vwcvtu_x_x_v_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2( @@ -84,7 +84,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1(vuint16mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2(vuint16m1_t src, size_t vl) { - return vwcvtu_x_x_v_u32m2(src, vl); + return __riscv_vwcvtu_x_x_v_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4( @@ -93,7 +93,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2(vuint16m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4(vuint16m2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m4(src, vl); + return __riscv_vwcvtu_x_x_v_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8( @@ -102,7 +102,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4(vuint16m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8(vuint16m4_t src, size_t vl) { - return vwcvtu_x_x_v_u32m8(src, vl); + return __riscv_vwcvtu_x_x_v_u32m8(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1( @@ -111,7 +111,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8(vuint16m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1(vuint32mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m1(src, vl); + return __riscv_vwcvtu_x_x_v_u64m1(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2( @@ -120,7 +120,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1(vuint32mf2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2(vuint32m1_t src, size_t vl) { - return vwcvtu_x_x_v_u64m2(src, vl); + return __riscv_vwcvtu_x_x_v_u64m2(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4( @@ -129,7 +129,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2(vuint32m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4(vuint32m2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m4(src, vl); + return __riscv_vwcvtu_x_x_v_u64m4(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8( @@ -138,7 +138,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4(vuint32m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8(vuint32m4_t src, size_t vl) { - return vwcvtu_x_x_v_u64m8(src, vl); + return __riscv_vwcvtu_x_x_v_u64m8(src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m( @@ -147,7 +147,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8(vuint32m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf4_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u16mf4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m( @@ -156,7 +156,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf2_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u16mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m( @@ -165,7 +165,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m1_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u16m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m( @@ -174,7 +174,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) { - return vwcvtu_x_x_v_u16m2_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u16m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m( @@ -183,7 +183,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m4_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u16m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m( @@ -192,7 +192,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) { - return vwcvtu_x_x_v_u16m8_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u16m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m( @@ -201,7 +201,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u32mf2_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u32mf2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m( @@ -210,7 +210,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m1_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u32m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m( @@ -219,7 +219,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask, vuint16mf2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return vwcvtu_x_x_v_u32m2_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u32m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m( @@ -228,7 +228,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m4_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u32m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m( @@ -237,7 +237,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return vwcvtu_x_x_v_u32m8_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u32m8_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m( @@ -246,7 +246,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m1_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u64m1_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m( @@ -255,7 +255,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask, vuint32mf2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return vwcvtu_x_x_v_u64m2_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u64m2_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m( @@ -264,7 +264,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m4_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u64m4_m(mask, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m( @@ -273,6 +273,6 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return vwcvtu_x_x_v_u64m8_m(mask, src, vl); + return __riscv_vwcvtu_x_x_v_u64m8_m(mask, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmacc.c index 4c6a796a6e5a..d56952b03854 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vv_i16mf4(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4( @@ -22,7 +22,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vx_i16mf4(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2( @@ -31,7 +31,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vv_i16mf2(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2( @@ -40,7 +40,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vx_i16mf2(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1( @@ -49,7 +49,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vv_i16m1(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1( @@ -58,7 +58,7 @@ vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vx_i16m1(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2( @@ -67,7 +67,7 @@ vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vv_i16m2(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2( @@ -76,7 +76,7 @@ vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vx_i16m2(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4( @@ -85,7 +85,7 @@ vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vv_i16m4(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4( @@ -94,7 +94,7 @@ vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vx_i16m4(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8( @@ -103,7 +103,7 @@ vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vv_i16m8(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8( @@ -112,7 +112,7 @@ vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vx_i16m8(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2( @@ -121,7 +121,7 @@ vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vv_i32mf2(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2( @@ -130,7 +130,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vx_i32mf2(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1( @@ -139,7 +139,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vv_i32m1(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1( @@ -148,7 +148,7 @@ vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vx_i32m1(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2( @@ -157,7 +157,7 @@ vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vv_i32m2(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2( @@ -166,7 +166,7 @@ vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vx_i32m2(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4( @@ -175,7 +175,7 @@ vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vv_i32m4(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4( @@ -184,7 +184,7 @@ vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vx_i32m4(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8( @@ -193,7 +193,7 @@ vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vv_i32m8(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8( @@ -202,7 +202,7 @@ vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vx_i32m8(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1( @@ -211,7 +211,7 @@ vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vv_i64m1(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1( @@ -220,7 +220,7 @@ vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vx_i64m1(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2( @@ -229,7 +229,7 @@ vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vv_i64m2(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2( @@ -238,7 +238,7 @@ vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vx_i64m2(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4( @@ -247,7 +247,7 @@ vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vv_i64m4(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4( @@ -256,7 +256,7 @@ vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vx_i64m4(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8( @@ -265,7 +265,7 @@ vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vv_i64m8(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8( @@ -274,7 +274,7 @@ vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vx_i64m8(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m( @@ -283,7 +283,7 @@ vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vv_i16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m( @@ -292,7 +292,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vx_i16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m( @@ -301,7 +301,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vv_i16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m( @@ -310,7 +310,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vx_i16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m( @@ -319,7 +319,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vv_i16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m( @@ -328,7 +328,7 @@ vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vx_i16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m( @@ -337,7 +337,7 @@ vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vv_i16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m( @@ -346,7 +346,7 @@ vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vx_i16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m( @@ -355,7 +355,7 @@ vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vv_i16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m( @@ -364,7 +364,7 @@ vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vx_i16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m( @@ -373,7 +373,7 @@ vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vv_i16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m( @@ -382,7 +382,7 @@ vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vx_i16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m( @@ -391,7 +391,7 @@ vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vv_i32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m( @@ -400,7 +400,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vx_i32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m( @@ -409,7 +409,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vv_i32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m( @@ -418,7 +418,7 @@ vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vx_i32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m( @@ -427,7 +427,7 @@ vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vv_i32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m( @@ -436,7 +436,7 @@ vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vx_i32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m( @@ -445,7 +445,7 @@ vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vv_i32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m( @@ -454,7 +454,7 @@ vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vx_i32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m( @@ -463,7 +463,7 @@ vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vv_i32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m( @@ -472,7 +472,7 @@ vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vx_i32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m( @@ -481,7 +481,7 @@ vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vv_i64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m( @@ -490,7 +490,7 @@ vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vx_i64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m( @@ -499,7 +499,7 @@ vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vv_i64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m( @@ -508,7 +508,7 @@ vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vx_i64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m( @@ -517,7 +517,7 @@ vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vv_i64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m( @@ -526,7 +526,7 @@ vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vx_i64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m( @@ -535,7 +535,7 @@ vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vv_i64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m( @@ -544,6 +544,6 @@ vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vx_i64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccsu.c index 4fe18b19aa1c..4a21eeeea274 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccsu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf4(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4( @@ -22,7 +22,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf4(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2( @@ -31,7 +31,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf2(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2( @@ -40,7 +40,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf2(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1( @@ -49,7 +49,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m1(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1( @@ -58,7 +58,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m1(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2( @@ -67,7 +67,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vv_i16m2(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2( @@ -76,7 +76,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vx_i16m2(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4( @@ -85,7 +85,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m4(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4( @@ -94,7 +94,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m4(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8( @@ -103,7 +103,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vv_i16m8(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8( @@ -112,7 +112,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vx_i16m8(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2( @@ -121,7 +121,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i32mf2(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2( @@ -130,7 +130,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i32mf2(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1( @@ -139,7 +139,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m1(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1( @@ -148,7 +148,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m1(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2( @@ -157,7 +157,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vv_i32m2(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2( @@ -166,7 +166,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vx_i32m2(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4( @@ -175,7 +175,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m4(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4( @@ -184,7 +184,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m4(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8( @@ -193,7 +193,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vv_i32m8(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8( @@ -202,7 +202,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vx_i32m8(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1( @@ -211,7 +211,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m1(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1( @@ -220,7 +220,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m1(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2( @@ -229,7 +229,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vv_i64m2(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2( @@ -238,7 +238,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vx_i64m2(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4( @@ -247,7 +247,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m4(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4( @@ -256,7 +256,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m4(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8( @@ -265,7 +265,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vv_i64m8(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8( @@ -274,7 +274,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vx_i64m8(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m( @@ -283,7 +283,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m( @@ -292,7 +292,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m( @@ -301,7 +301,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m( @@ -310,7 +310,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m( @@ -319,7 +319,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m( @@ -328,7 +328,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m( @@ -337,7 +337,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vv_i16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m( @@ -346,7 +346,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vx_i16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m( @@ -355,7 +355,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m( @@ -364,7 +364,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m( @@ -373,7 +373,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vv_i16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m( @@ -382,7 +382,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vx_i16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m( @@ -391,7 +391,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m( @@ -400,7 +400,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m( @@ -409,7 +409,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m( @@ -418,7 +418,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m( @@ -427,7 +427,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vv_i32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m( @@ -436,7 +436,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vx_i32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m( @@ -445,7 +445,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m( @@ -454,7 +454,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m( @@ -463,7 +463,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vv_i32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m( @@ -472,7 +472,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vx_i32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m( @@ -481,7 +481,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m( @@ -490,7 +490,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m( @@ -499,7 +499,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vv_i64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m( @@ -508,7 +508,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vx_i64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m( @@ -517,7 +517,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m( @@ -526,7 +526,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m( @@ -535,7 +535,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vv_i64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m( @@ -544,6 +544,6 @@ vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vx_i64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccu.c index 460be5c3dee8..6896c7f6cbe8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vv_u16mf4(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4( @@ -22,7 +22,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vx_u16mf4(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2( @@ -31,7 +31,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vv_u16mf2(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2( @@ -40,7 +40,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vx_u16mf2(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1( @@ -49,7 +49,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vv_u16m1(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1( @@ -58,7 +58,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vx_u16m1(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2( @@ -67,7 +67,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vv_u16m2(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2( @@ -76,7 +76,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vx_u16m2(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4( @@ -85,7 +85,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vv_u16m4(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4( @@ -94,7 +94,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vx_u16m4(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8( @@ -103,7 +103,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vv_u16m8(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8( @@ -112,7 +112,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vx_u16m8(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2( @@ -121,7 +121,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vv_u32mf2(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32mf2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2( @@ -130,7 +130,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vx_u32mf2(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1( @@ -139,7 +139,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vv_u32m1(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1( @@ -148,7 +148,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vx_u32m1(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2( @@ -157,7 +157,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vv_u32m2(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2( @@ -166,7 +166,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vx_u32m2(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4( @@ -175,7 +175,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vv_u32m4(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4( @@ -184,7 +184,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vx_u32m4(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8( @@ -193,7 +193,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vv_u32m8(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8( @@ -202,7 +202,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vx_u32m8(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1( @@ -211,7 +211,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vv_u64m1(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m1(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1( @@ -220,7 +220,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vx_u64m1(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2( @@ -229,7 +229,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vv_u64m2(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m2(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2( @@ -238,7 +238,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vx_u64m2(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4( @@ -247,7 +247,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vv_u64m4(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m4(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4( @@ -256,7 +256,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vx_u64m4(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8( @@ -265,7 +265,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vv_u64m8(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m8(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8( @@ -274,7 +274,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vx_u64m8(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m( @@ -283,7 +283,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vv_u16mf4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m( @@ -292,7 +292,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vx_u16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m( @@ -301,7 +301,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint8_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vv_u16mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m( @@ -310,7 +310,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vx_u16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m( @@ -319,7 +319,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint8_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vv_u16m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m( @@ -328,7 +328,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vx_u16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m( @@ -337,7 +337,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vv_u16m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m( @@ -346,7 +346,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vx_u16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m( @@ -355,7 +355,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vv_u16m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m( @@ -364,7 +364,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vx_u16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m( @@ -373,7 +373,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vv_u16m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m( @@ -382,7 +382,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vx_u16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m( @@ -391,7 +391,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vv_u32mf2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32mf2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m( @@ -400,7 +400,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vx_u32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m( @@ -409,7 +409,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vv_u32m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m( @@ -418,7 +418,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vx_u32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m( @@ -427,7 +427,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vv_u32m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m( @@ -436,7 +436,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vx_u32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m( @@ -445,7 +445,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vv_u32m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m( @@ -454,7 +454,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vx_u32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m( @@ -463,7 +463,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vv_u32m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m( @@ -472,7 +472,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vx_u32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m( @@ -481,7 +481,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vv_u64m1_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m1_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m( @@ -490,7 +490,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vx_u64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m( @@ -499,7 +499,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vv_u64m2_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m2_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m( @@ -508,7 +508,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vx_u64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m( @@ -517,7 +517,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vv_u64m4_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m4_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m( @@ -526,7 +526,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vx_u64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m( @@ -535,7 +535,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vv_u64m8_m(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m8_m(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m( @@ -544,6 +544,6 @@ vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vx_u64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccus.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccus.c index 128a56eb61b4..0b2c1130d8bc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccus.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmaccus.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmaccus_vx_i16mf4(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2( @@ -22,7 +22,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmaccus_vx_i16mf2(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1( @@ -31,7 +31,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmaccus_vx_i16m1(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2( @@ -40,7 +40,7 @@ vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmaccus_vx_i16m2(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4( @@ -49,7 +49,7 @@ vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmaccus_vx_i16m4(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8( @@ -58,7 +58,7 @@ vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmaccus_vx_i16m8(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2( @@ -67,7 +67,7 @@ vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmaccus_vx_i32mf2(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32mf2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1( @@ -76,7 +76,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmaccus_vx_i32m1(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2( @@ -85,7 +85,7 @@ vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmaccus_vx_i32m2(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4( @@ -94,7 +94,7 @@ vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmaccus_vx_i32m4(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8( @@ -103,7 +103,7 @@ vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmaccus_vx_i32m8(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1( @@ -112,7 +112,7 @@ vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmaccus_vx_i64m1(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m1(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2( @@ -121,7 +121,7 @@ vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmaccus_vx_i64m2(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m2(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4( @@ -130,7 +130,7 @@ vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmaccus_vx_i64m4(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m4(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8( @@ -139,7 +139,7 @@ vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmaccus_vx_i64m8(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m8(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m( @@ -148,7 +148,7 @@ vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmaccus_vx_i16mf4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m( @@ -157,7 +157,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmaccus_vx_i16mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m( @@ -166,7 +166,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmaccus_vx_i16m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m( @@ -175,7 +175,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmaccus_vx_i16m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m( @@ -184,7 +184,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmaccus_vx_i16m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m( @@ -193,7 +193,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmaccus_vx_i16m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m( @@ -202,7 +202,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmaccus_vx_i32mf2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32mf2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m( @@ -211,7 +211,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmaccus_vx_i32m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m( @@ -220,7 +220,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmaccus_vx_i32m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m( @@ -229,7 +229,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmaccus_vx_i32m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m( @@ -238,7 +238,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmaccus_vx_i32m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m8_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m( @@ -247,7 +247,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmaccus_vx_i64m1_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m1_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m( @@ -256,7 +256,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmaccus_vx_i64m2_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m2_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m( @@ -265,7 +265,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmaccus_vx_i64m4_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m4_m(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m( @@ -274,6 +274,6 @@ vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmaccus_vx_i64m8_m(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m8_m(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmul.c index 6aa8c765d307..46272f4e21ca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmul.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwmul_vv_i16mf4(op1, op2, vl); + return __riscv_vwmul_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4( @@ -21,7 +21,7 @@ vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf4(op1, op2, vl); + return __riscv_vwmul_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2( @@ -30,7 +30,7 @@ vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwmul_vv_i16mf2(op1, op2, vl); + return __riscv_vwmul_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2( @@ -39,7 +39,7 @@ vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf2(op1, op2, vl); + return __riscv_vwmul_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1( @@ -48,7 +48,7 @@ vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwmul_vv_i16m1(op1, op2, vl); + return __riscv_vwmul_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1( @@ -57,7 +57,7 @@ vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m1(op1, op2, vl); + return __riscv_vwmul_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2( @@ -66,7 +66,7 @@ vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwmul_vv_i16m2(op1, op2, vl); + return __riscv_vwmul_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2( @@ -75,7 +75,7 @@ vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m2(op1, op2, vl); + return __riscv_vwmul_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4( @@ -84,7 +84,7 @@ vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwmul_vv_i16m4(op1, op2, vl); + return __riscv_vwmul_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4( @@ -93,7 +93,7 @@ vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m4(op1, op2, vl); + return __riscv_vwmul_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8( @@ -102,7 +102,7 @@ vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwmul_vv_i16m8(op1, op2, vl); + return __riscv_vwmul_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8( @@ -111,7 +111,7 @@ vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m8(op1, op2, vl); + return __riscv_vwmul_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2( @@ -120,7 +120,7 @@ vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwmul_vv_i32mf2(op1, op2, vl); + return __riscv_vwmul_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2( @@ -129,7 +129,7 @@ vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32mf2(op1, op2, vl); + return __riscv_vwmul_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1( @@ -138,7 +138,7 @@ vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwmul_vv_i32m1(op1, op2, vl); + return __riscv_vwmul_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1( @@ -147,7 +147,7 @@ vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m1(op1, op2, vl); + return __riscv_vwmul_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2( @@ -156,7 +156,7 @@ vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwmul_vv_i32m2(op1, op2, vl); + return __riscv_vwmul_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2( @@ -165,7 +165,7 @@ vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m2(op1, op2, vl); + return __riscv_vwmul_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4( @@ -174,7 +174,7 @@ vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwmul_vv_i32m4(op1, op2, vl); + return __riscv_vwmul_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4( @@ -183,7 +183,7 @@ vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m4(op1, op2, vl); + return __riscv_vwmul_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8( @@ -192,7 +192,7 @@ vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwmul_vv_i32m8(op1, op2, vl); + return __riscv_vwmul_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8( @@ -201,7 +201,7 @@ vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m8(op1, op2, vl); + return __riscv_vwmul_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1( @@ -210,7 +210,7 @@ vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwmul_vv_i64m1(op1, op2, vl); + return __riscv_vwmul_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1( @@ -219,7 +219,7 @@ vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m1(op1, op2, vl); + return __riscv_vwmul_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2( @@ -228,7 +228,7 @@ vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwmul_vv_i64m2(op1, op2, vl); + return __riscv_vwmul_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2( @@ -237,7 +237,7 @@ vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m2(op1, op2, vl); + return __riscv_vwmul_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4( @@ -246,7 +246,7 @@ vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwmul_vv_i64m4(op1, op2, vl); + return __riscv_vwmul_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4( @@ -255,7 +255,7 @@ vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m4(op1, op2, vl); + return __riscv_vwmul_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8( @@ -264,7 +264,7 @@ vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwmul_vv_i64m8(op1, op2, vl); + return __riscv_vwmul_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8( @@ -273,7 +273,7 @@ vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m8(op1, op2, vl); + return __riscv_vwmul_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_m( @@ -282,7 +282,7 @@ vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwmul_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m( @@ -291,7 +291,7 @@ vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m( @@ -300,7 +300,7 @@ vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwmul_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m( @@ -309,7 +309,7 @@ vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m( @@ -318,7 +318,7 @@ vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwmul_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m( @@ -327,7 +327,7 @@ vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m( @@ -336,7 +336,7 @@ vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwmul_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m( @@ -345,7 +345,7 @@ vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m( @@ -354,7 +354,7 @@ vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwmul_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m( @@ -363,7 +363,7 @@ vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m( @@ -372,7 +372,7 @@ vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwmul_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m( @@ -381,7 +381,7 @@ vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m( @@ -390,7 +390,7 @@ vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwmul_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m( @@ -399,7 +399,7 @@ vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m( @@ -408,7 +408,7 @@ vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwmul_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m( @@ -417,7 +417,7 @@ vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m( @@ -426,7 +426,7 @@ vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwmul_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m( @@ -435,7 +435,7 @@ vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m( @@ -444,7 +444,7 @@ vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwmul_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m( @@ -453,7 +453,7 @@ vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m( @@ -462,7 +462,7 @@ vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwmul_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m( @@ -471,7 +471,7 @@ vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m( @@ -480,7 +480,7 @@ vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwmul_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m( @@ -489,7 +489,7 @@ vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m( @@ -498,7 +498,7 @@ vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwmul_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m( @@ -507,7 +507,7 @@ vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m( @@ -516,7 +516,7 @@ vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwmul_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m( @@ -525,7 +525,7 @@ vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m( @@ -534,7 +534,7 @@ vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwmul_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vwmul_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m( @@ -543,6 +543,6 @@ vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vwmul_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulsu.c index 5d57827a1674..4b8960d86a22 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulsu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulsu_vv_i16mf4(op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4( @@ -21,7 +21,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf4(op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2( @@ -30,7 +30,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulsu_vv_i16mf2(op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2( @@ -39,7 +39,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf2(op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1( @@ -48,7 +48,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulsu_vv_i16m1(op1, op2, vl); + return __riscv_vwmulsu_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1( @@ -57,7 +57,7 @@ vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m1(op1, op2, vl); + return __riscv_vwmulsu_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2( @@ -66,7 +66,7 @@ vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulsu_vv_i16m2(op1, op2, vl); + return __riscv_vwmulsu_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2( @@ -75,7 +75,7 @@ vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m2(op1, op2, vl); + return __riscv_vwmulsu_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4( @@ -84,7 +84,7 @@ vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulsu_vv_i16m4(op1, op2, vl); + return __riscv_vwmulsu_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4( @@ -93,7 +93,7 @@ vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m4(op1, op2, vl); + return __riscv_vwmulsu_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8( @@ -102,7 +102,7 @@ vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulsu_vv_i16m8(op1, op2, vl); + return __riscv_vwmulsu_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8( @@ -111,7 +111,7 @@ vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m8(op1, op2, vl); + return __riscv_vwmulsu_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2( @@ -120,7 +120,7 @@ vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulsu_vv_i32mf2(op1, op2, vl); + return __riscv_vwmulsu_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2( @@ -129,7 +129,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32mf2(op1, op2, vl); + return __riscv_vwmulsu_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1( @@ -138,7 +138,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulsu_vv_i32m1(op1, op2, vl); + return __riscv_vwmulsu_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1( @@ -147,7 +147,7 @@ vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m1(op1, op2, vl); + return __riscv_vwmulsu_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2( @@ -156,7 +156,7 @@ vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulsu_vv_i32m2(op1, op2, vl); + return __riscv_vwmulsu_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2( @@ -165,7 +165,7 @@ vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m2(op1, op2, vl); + return __riscv_vwmulsu_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4( @@ -174,7 +174,7 @@ vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulsu_vv_i32m4(op1, op2, vl); + return __riscv_vwmulsu_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4( @@ -183,7 +183,7 @@ vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m4(op1, op2, vl); + return __riscv_vwmulsu_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8( @@ -192,7 +192,7 @@ vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulsu_vv_i32m8(op1, op2, vl); + return __riscv_vwmulsu_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8( @@ -201,7 +201,7 @@ vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m8(op1, op2, vl); + return __riscv_vwmulsu_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1( @@ -210,7 +210,7 @@ vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulsu_vv_i64m1(op1, op2, vl); + return __riscv_vwmulsu_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1( @@ -219,7 +219,7 @@ vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m1(op1, op2, vl); + return __riscv_vwmulsu_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2( @@ -228,7 +228,7 @@ vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulsu_vv_i64m2(op1, op2, vl); + return __riscv_vwmulsu_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2( @@ -237,7 +237,7 @@ vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m2(op1, op2, vl); + return __riscv_vwmulsu_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4( @@ -246,7 +246,7 @@ vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulsu_vv_i64m4(op1, op2, vl); + return __riscv_vwmulsu_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4( @@ -255,7 +255,7 @@ vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m4(op1, op2, vl); + return __riscv_vwmulsu_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8( @@ -264,7 +264,7 @@ vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulsu_vv_i64m8(op1, op2, vl); + return __riscv_vwmulsu_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8( @@ -273,7 +273,7 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m8(op1, op2, vl); + return __riscv_vwmulsu_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m( @@ -282,7 +282,7 @@ vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulsu_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m( @@ -291,7 +291,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m( @@ -300,7 +300,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulsu_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m( @@ -309,7 +309,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m( @@ -318,7 +318,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulsu_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m( @@ -327,7 +327,7 @@ vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m( @@ -336,7 +336,7 @@ vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulsu_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m( @@ -345,7 +345,7 @@ vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m( @@ -354,7 +354,7 @@ vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulsu_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m( @@ -363,7 +363,7 @@ vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m( @@ -372,7 +372,7 @@ vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulsu_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m( @@ -381,7 +381,7 @@ vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m( @@ -390,7 +390,7 @@ vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulsu_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m( @@ -399,7 +399,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m( @@ -408,7 +408,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulsu_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m( @@ -417,7 +417,7 @@ vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m( @@ -426,7 +426,7 @@ vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulsu_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m( @@ -435,7 +435,7 @@ vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m( @@ -444,7 +444,7 @@ vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulsu_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m( @@ -453,7 +453,7 @@ vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m( @@ -462,7 +462,7 @@ vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulsu_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m( @@ -471,7 +471,7 @@ vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m( @@ -480,7 +480,7 @@ vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulsu_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m( @@ -489,7 +489,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m( @@ -498,7 +498,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulsu_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m( @@ -507,7 +507,7 @@ vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m( @@ -516,7 +516,7 @@ vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulsu_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m( @@ -525,7 +525,7 @@ vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m( @@ -534,7 +534,7 @@ vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulsu_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m( @@ -543,6 +543,6 @@ vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulu.c index 087a6819dd98..20a041e0605e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwmulu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulu_vv_u16mf4(op1, op2, vl); + return __riscv_vwmulu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4( @@ -21,7 +21,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf4(op1, op2, vl); + return __riscv_vwmulu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2( @@ -30,7 +30,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulu_vv_u16mf2(op1, op2, vl); + return __riscv_vwmulu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2( @@ -39,7 +39,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf2(op1, op2, vl); + return __riscv_vwmulu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1( @@ -48,7 +48,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulu_vv_u16m1(op1, op2, vl); + return __riscv_vwmulu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1( @@ -57,7 +57,7 @@ vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m1(op1, op2, vl); + return __riscv_vwmulu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2( @@ -66,7 +66,7 @@ vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulu_vv_u16m2(op1, op2, vl); + return __riscv_vwmulu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2( @@ -75,7 +75,7 @@ vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m2(op1, op2, vl); + return __riscv_vwmulu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4( @@ -84,7 +84,7 @@ vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulu_vv_u16m4(op1, op2, vl); + return __riscv_vwmulu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4( @@ -93,7 +93,7 @@ vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m4(op1, op2, vl); + return __riscv_vwmulu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8( @@ -102,7 +102,7 @@ vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulu_vv_u16m8(op1, op2, vl); + return __riscv_vwmulu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8( @@ -111,7 +111,7 @@ vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m8(op1, op2, vl); + return __riscv_vwmulu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2( @@ -120,7 +120,7 @@ vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulu_vv_u32mf2(op1, op2, vl); + return __riscv_vwmulu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2( @@ -129,7 +129,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32mf2(op1, op2, vl); + return __riscv_vwmulu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1( @@ -138,7 +138,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulu_vv_u32m1(op1, op2, vl); + return __riscv_vwmulu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1( @@ -147,7 +147,7 @@ vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m1(op1, op2, vl); + return __riscv_vwmulu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2( @@ -156,7 +156,7 @@ vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulu_vv_u32m2(op1, op2, vl); + return __riscv_vwmulu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2( @@ -165,7 +165,7 @@ vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m2(op1, op2, vl); + return __riscv_vwmulu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4( @@ -174,7 +174,7 @@ vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulu_vv_u32m4(op1, op2, vl); + return __riscv_vwmulu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4( @@ -183,7 +183,7 @@ vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m4(op1, op2, vl); + return __riscv_vwmulu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8( @@ -192,7 +192,7 @@ vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulu_vv_u32m8(op1, op2, vl); + return __riscv_vwmulu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8( @@ -201,7 +201,7 @@ vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m8(op1, op2, vl); + return __riscv_vwmulu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1( @@ -210,7 +210,7 @@ vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulu_vv_u64m1(op1, op2, vl); + return __riscv_vwmulu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1( @@ -219,7 +219,7 @@ vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m1(op1, op2, vl); + return __riscv_vwmulu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2( @@ -228,7 +228,7 @@ vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulu_vv_u64m2(op1, op2, vl); + return __riscv_vwmulu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2( @@ -237,7 +237,7 @@ vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m2(op1, op2, vl); + return __riscv_vwmulu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4( @@ -246,7 +246,7 @@ vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulu_vv_u64m4(op1, op2, vl); + return __riscv_vwmulu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4( @@ -255,7 +255,7 @@ vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m4(op1, op2, vl); + return __riscv_vwmulu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8( @@ -264,7 +264,7 @@ vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulu_vv_u64m8(op1, op2, vl); + return __riscv_vwmulu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8( @@ -273,7 +273,7 @@ vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m8(op1, op2, vl); + return __riscv_vwmulu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m( @@ -282,7 +282,7 @@ vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m( @@ -291,7 +291,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m( @@ -300,7 +300,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m( @@ -309,7 +309,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m( @@ -318,7 +318,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m( @@ -327,7 +327,7 @@ vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m( @@ -336,7 +336,7 @@ vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m( @@ -345,7 +345,7 @@ vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m( @@ -354,7 +354,7 @@ vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m( @@ -363,7 +363,7 @@ vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m( @@ -372,7 +372,7 @@ vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m( @@ -381,7 +381,7 @@ vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m( @@ -390,7 +390,7 @@ vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m( @@ -399,7 +399,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m( @@ -408,7 +408,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m( @@ -417,7 +417,7 @@ vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m( @@ -426,7 +426,7 @@ vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m( @@ -435,7 +435,7 @@ vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m( @@ -444,7 +444,7 @@ vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m( @@ -453,7 +453,7 @@ vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m( @@ -462,7 +462,7 @@ vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m( @@ -471,7 +471,7 @@ vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m( @@ -480,7 +480,7 @@ vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m( @@ -489,7 +489,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m( @@ -498,7 +498,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m( @@ -507,7 +507,7 @@ vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m( @@ -516,7 +516,7 @@ vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m( @@ -525,7 +525,7 @@ vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m( @@ -534,7 +534,7 @@ vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vwmulu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m( @@ -543,6 +543,6 @@ vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vwmulu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c index 360e1609ad31..d24e82b83910 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf8_i16m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1( @@ -21,7 +21,7 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vector, vint16m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf4_i16m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1( @@ -30,7 +30,7 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vector, vint16m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf2_i16m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1( @@ -39,7 +39,7 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vector, vint16m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m1_i16m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i8m1_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1( @@ -48,7 +48,7 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m2_i16m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i8m2_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1( @@ -57,7 +57,7 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m4_i16m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i8m4_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1( @@ -66,7 +66,7 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m8_i16m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i8m8_i16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1( @@ -75,7 +75,7 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf4_i32m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i16mf4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1( @@ -84,7 +84,7 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vector, vint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf2_i32m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i16mf2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1( @@ -93,7 +93,7 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vector, vint32m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m1_i32m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i16m1_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1( @@ -102,7 +102,7 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m2_i32m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i16m2_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1( @@ -111,7 +111,7 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m4_i32m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i16m4_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1( @@ -120,7 +120,7 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m8_i32m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i16m8_i32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1( @@ -129,7 +129,7 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i32mf2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1( @@ -138,7 +138,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vector, vint64m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m1_i64m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i32m1_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1( @@ -147,7 +147,7 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m2_i64m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i32m2_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1( @@ -156,7 +156,7 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m4_i64m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i32m4_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1( @@ -165,7 +165,7 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m8_i64m1(vector, scalar, vl); + return __riscv_vwredsum_vs_i32m8_i64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( @@ -174,7 +174,7 @@ vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf8_i16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( @@ -183,7 +183,7 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint8mf8_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( @@ -192,7 +192,7 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint8mf4_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( @@ -201,7 +201,7 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint8mf2_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m1_i16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m1_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( @@ -210,7 +210,7 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint8m1_t vector, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m2_i16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m2_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( @@ -219,7 +219,7 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint8m2_t vector, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m4_i16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m4_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( @@ -228,7 +228,7 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint8m4_t vector, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m8_i16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m8_i16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( @@ -237,7 +237,7 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint8m8_t vector, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf4_i32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i16mf4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( @@ -246,7 +246,7 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint16mf4_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i16mf2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( @@ -255,7 +255,7 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint16mf2_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m1_i32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m1_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( @@ -264,7 +264,7 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint16m1_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m2_i32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m2_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( @@ -273,7 +273,7 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint16m2_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m4_i32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m4_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( @@ -282,7 +282,7 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint16m4_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m8_i32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m8_i32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( @@ -291,7 +291,7 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint16m8_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i32mf2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( @@ -300,7 +300,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint32mf2_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m1_i64m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m1_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( @@ -309,7 +309,7 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint32m1_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m2_i64m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m2_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( @@ -318,7 +318,7 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint32m2_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m4_i64m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m4_i64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( @@ -327,6 +327,6 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint32m4_t vector, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m8_i64m1_m(mask, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m8_i64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c index d0fde2644122..e310e46e59f8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1( @@ -21,7 +21,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1( @@ -30,7 +30,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1( @@ -39,7 +39,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m1_u16m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m1_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1( @@ -48,7 +48,7 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m2_u16m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m2_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1( @@ -57,7 +57,7 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m4_u16m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m4_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1( @@ -66,7 +66,7 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m8_u16m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m8_u16m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1( @@ -75,7 +75,7 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1( @@ -84,7 +84,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1( @@ -93,7 +93,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m1_u32m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m1_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1( @@ -102,7 +102,7 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m2_u32m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m2_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1( @@ -111,7 +111,7 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m4_u32m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m4_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1( @@ -120,7 +120,7 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m8_u32m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m8_u32m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1( @@ -129,7 +129,7 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1( @@ -138,7 +138,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scal // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m1_u64m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m1_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1( @@ -147,7 +147,7 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m2_u64m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m2_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1( @@ -156,7 +156,7 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m4_u64m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m4_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1( @@ -165,7 +165,7 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m8_u64m1(vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m8_u64m1(vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( @@ -174,7 +174,7 @@ vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf8_u16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( @@ -183,7 +183,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint8mf8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( @@ -192,7 +192,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint8mf4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( @@ -201,7 +201,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint8mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m1_u16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m1_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( @@ -210,7 +210,7 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint8m1_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m2_u16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m2_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( @@ -219,7 +219,7 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint8m2_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m4_u16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m4_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( @@ -228,7 +228,7 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint8m4_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m8_u16m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m8_u16m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( @@ -237,7 +237,7 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint8m8_t vector, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf4_u32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16mf4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( @@ -246,7 +246,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint16mf4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16mf2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( @@ -255,7 +255,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint16mf2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m1_u32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m1_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( @@ -264,7 +264,7 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint16m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m2_u32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m2_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( @@ -273,7 +273,7 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint16m2_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m4_u32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m4_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( @@ -282,7 +282,7 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint16m4_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m8_u32m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m8_u32m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( @@ -291,7 +291,7 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint16m8_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32mf2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( @@ -300,7 +300,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint32mf2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m1_u64m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m1_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( @@ -309,7 +309,7 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint32m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m2_u64m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m2_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( @@ -318,7 +318,7 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint32m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m4_u64m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m4_u64m1_m(mask, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( @@ -327,6 +327,6 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint32m4_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m8_u64m1_m(mask, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m8_u64m1_m(mask, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsub.c index d11a8f88334b..feae78f6103c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsub.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_vv_i16mf4(op1, op2, vl); + return __riscv_vwsub_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4( @@ -21,7 +21,7 @@ vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf4(op1, op2, vl); + return __riscv_vwsub_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4( @@ -30,7 +30,7 @@ vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_wv_i16mf4(op1, op2, vl); + return __riscv_vwsub_wv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4( @@ -39,7 +39,7 @@ vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf4(op1, op2, vl); + return __riscv_vwsub_wx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2( @@ -48,7 +48,7 @@ vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_vv_i16mf2(op1, op2, vl); + return __riscv_vwsub_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2( @@ -57,7 +57,7 @@ vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf2(op1, op2, vl); + return __riscv_vwsub_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2( @@ -66,7 +66,7 @@ vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_wv_i16mf2(op1, op2, vl); + return __riscv_vwsub_wv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2( @@ -75,7 +75,7 @@ vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf2(op1, op2, vl); + return __riscv_vwsub_wx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1( @@ -84,7 +84,7 @@ vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_vv_i16m1(op1, op2, vl); + return __riscv_vwsub_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1( @@ -93,7 +93,7 @@ vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m1(op1, op2, vl); + return __riscv_vwsub_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1( @@ -102,7 +102,7 @@ vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_wv_i16m1(op1, op2, vl); + return __riscv_vwsub_wv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1( @@ -111,7 +111,7 @@ vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m1(op1, op2, vl); + return __riscv_vwsub_wx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2( @@ -120,7 +120,7 @@ vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwsub_vv_i16m2(op1, op2, vl); + return __riscv_vwsub_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2( @@ -129,7 +129,7 @@ vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m2(op1, op2, vl); + return __riscv_vwsub_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2( @@ -138,7 +138,7 @@ vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwsub_wv_i16m2(op1, op2, vl); + return __riscv_vwsub_wv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2( @@ -147,7 +147,7 @@ vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m2(op1, op2, vl); + return __riscv_vwsub_wx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4( @@ -156,7 +156,7 @@ vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwsub_vv_i16m4(op1, op2, vl); + return __riscv_vwsub_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4( @@ -165,7 +165,7 @@ vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m4(op1, op2, vl); + return __riscv_vwsub_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4( @@ -174,7 +174,7 @@ vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwsub_wv_i16m4(op1, op2, vl); + return __riscv_vwsub_wv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4( @@ -183,7 +183,7 @@ vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m4(op1, op2, vl); + return __riscv_vwsub_wx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8( @@ -192,7 +192,7 @@ vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwsub_vv_i16m8(op1, op2, vl); + return __riscv_vwsub_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8( @@ -201,7 +201,7 @@ vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m8(op1, op2, vl); + return __riscv_vwsub_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8( @@ -210,7 +210,7 @@ vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwsub_wv_i16m8(op1, op2, vl); + return __riscv_vwsub_wv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8( @@ -219,7 +219,7 @@ vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m8(op1, op2, vl); + return __riscv_vwsub_wx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2( @@ -228,7 +228,7 @@ vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_vv_i32mf2(op1, op2, vl); + return __riscv_vwsub_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2( @@ -237,7 +237,7 @@ vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32mf2(op1, op2, vl); + return __riscv_vwsub_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2( @@ -246,7 +246,7 @@ vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_wv_i32mf2(op1, op2, vl); + return __riscv_vwsub_wv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32mf2(op1, op2, vl); + return __riscv_vwsub_wx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_vv_i32m1(op1, op2, vl); + return __riscv_vwsub_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m1(op1, op2, vl); + return __riscv_vwsub_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1( @@ -282,7 +282,7 @@ vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_wv_i32m1(op1, op2, vl); + return __riscv_vwsub_wv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1( @@ -291,7 +291,7 @@ vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m1(op1, op2, vl); + return __riscv_vwsub_wx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2( @@ -300,7 +300,7 @@ vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwsub_vv_i32m2(op1, op2, vl); + return __riscv_vwsub_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2( @@ -309,7 +309,7 @@ vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m2(op1, op2, vl); + return __riscv_vwsub_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2( @@ -318,7 +318,7 @@ vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwsub_wv_i32m2(op1, op2, vl); + return __riscv_vwsub_wv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2( @@ -327,7 +327,7 @@ vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m2(op1, op2, vl); + return __riscv_vwsub_wx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4( @@ -336,7 +336,7 @@ vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwsub_vv_i32m4(op1, op2, vl); + return __riscv_vwsub_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4( @@ -345,7 +345,7 @@ vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m4(op1, op2, vl); + return __riscv_vwsub_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4( @@ -354,7 +354,7 @@ vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwsub_wv_i32m4(op1, op2, vl); + return __riscv_vwsub_wv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4( @@ -363,7 +363,7 @@ vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m4(op1, op2, vl); + return __riscv_vwsub_wx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8( @@ -372,7 +372,7 @@ vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwsub_vv_i32m8(op1, op2, vl); + return __riscv_vwsub_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8( @@ -381,7 +381,7 @@ vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m8(op1, op2, vl); + return __riscv_vwsub_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8( @@ -390,7 +390,7 @@ vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwsub_wv_i32m8(op1, op2, vl); + return __riscv_vwsub_wv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8( @@ -399,7 +399,7 @@ vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m8(op1, op2, vl); + return __riscv_vwsub_wx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1( @@ -408,7 +408,7 @@ vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_vv_i64m1(op1, op2, vl); + return __riscv_vwsub_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1( @@ -417,7 +417,7 @@ vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m1(op1, op2, vl); + return __riscv_vwsub_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1( @@ -426,7 +426,7 @@ vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_wv_i64m1(op1, op2, vl); + return __riscv_vwsub_wv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1( @@ -435,7 +435,7 @@ vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m1(op1, op2, vl); + return __riscv_vwsub_wx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2( @@ -444,7 +444,7 @@ vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwsub_vv_i64m2(op1, op2, vl); + return __riscv_vwsub_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2( @@ -453,7 +453,7 @@ vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m2(op1, op2, vl); + return __riscv_vwsub_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2( @@ -462,7 +462,7 @@ vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwsub_wv_i64m2(op1, op2, vl); + return __riscv_vwsub_wv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2( @@ -471,7 +471,7 @@ vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m2(op1, op2, vl); + return __riscv_vwsub_wx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4( @@ -480,7 +480,7 @@ vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwsub_vv_i64m4(op1, op2, vl); + return __riscv_vwsub_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4( @@ -489,7 +489,7 @@ vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m4(op1, op2, vl); + return __riscv_vwsub_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4( @@ -498,7 +498,7 @@ vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwsub_wv_i64m4(op1, op2, vl); + return __riscv_vwsub_wv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4( @@ -507,7 +507,7 @@ vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m4(op1, op2, vl); + return __riscv_vwsub_wx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8( @@ -516,7 +516,7 @@ vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwsub_vv_i64m8(op1, op2, vl); + return __riscv_vwsub_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8( @@ -525,7 +525,7 @@ vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m8(op1, op2, vl); + return __riscv_vwsub_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8( @@ -534,7 +534,7 @@ vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwsub_wv_i64m8(op1, op2, vl); + return __riscv_vwsub_wv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8( @@ -543,7 +543,7 @@ vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m8(op1, op2, vl); + return __riscv_vwsub_wx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m( @@ -552,7 +552,7 @@ vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m( @@ -561,7 +561,7 @@ vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m( @@ -570,7 +570,7 @@ vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_wv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m( @@ -579,7 +579,7 @@ vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m( @@ -588,7 +588,7 @@ vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m( @@ -597,7 +597,7 @@ vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m( @@ -606,7 +606,7 @@ vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_wv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m( @@ -615,7 +615,7 @@ vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m( @@ -624,7 +624,7 @@ vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m( @@ -633,7 +633,7 @@ vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m( @@ -642,7 +642,7 @@ vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_wv_i16m1_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m( @@ -651,7 +651,7 @@ vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m1_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m( @@ -660,7 +660,7 @@ vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwsub_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m( @@ -669,7 +669,7 @@ vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m( @@ -678,7 +678,7 @@ vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwsub_wv_i16m2_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m( @@ -687,7 +687,7 @@ vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m2_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m( @@ -696,7 +696,7 @@ vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwsub_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m( @@ -705,7 +705,7 @@ vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m( @@ -714,7 +714,7 @@ vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwsub_wv_i16m4_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m( @@ -723,7 +723,7 @@ vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m4_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m( @@ -732,7 +732,7 @@ vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwsub_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m( @@ -741,7 +741,7 @@ vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m( @@ -750,7 +750,7 @@ vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwsub_wv_i16m8_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m( @@ -759,7 +759,7 @@ vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m8_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m( @@ -768,7 +768,7 @@ vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m( @@ -777,7 +777,7 @@ vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m( @@ -786,7 +786,7 @@ vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_wv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m( @@ -795,7 +795,7 @@ vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m( @@ -804,7 +804,7 @@ vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m( @@ -813,7 +813,7 @@ vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m( @@ -822,7 +822,7 @@ vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_wv_i32m1_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m( @@ -831,7 +831,7 @@ vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m1_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m( @@ -840,7 +840,7 @@ vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwsub_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m( @@ -849,7 +849,7 @@ vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m( @@ -858,7 +858,7 @@ vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwsub_wv_i32m2_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m( @@ -867,7 +867,7 @@ vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m2_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m( @@ -876,7 +876,7 @@ vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwsub_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m( @@ -885,7 +885,7 @@ vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m( @@ -894,7 +894,7 @@ vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwsub_wv_i32m4_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m( @@ -903,7 +903,7 @@ vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m4_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m( @@ -912,7 +912,7 @@ vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwsub_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m( @@ -921,7 +921,7 @@ vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m( @@ -930,7 +930,7 @@ vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwsub_wv_i32m8_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m( @@ -939,7 +939,7 @@ vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m8_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m( @@ -948,7 +948,7 @@ vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m( @@ -957,7 +957,7 @@ vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m( @@ -966,7 +966,7 @@ vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_wv_i64m1_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m( @@ -975,7 +975,7 @@ vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m1_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m( @@ -984,7 +984,7 @@ vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwsub_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m( @@ -993,7 +993,7 @@ vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m( @@ -1002,7 +1002,7 @@ vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwsub_wv_i64m2_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m( @@ -1011,7 +1011,7 @@ vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m2_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m( @@ -1020,7 +1020,7 @@ vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwsub_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m( @@ -1029,7 +1029,7 @@ vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m( @@ -1038,7 +1038,7 @@ vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwsub_wv_i64m4_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m( @@ -1047,7 +1047,7 @@ vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m4_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m( @@ -1056,7 +1056,7 @@ vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwsub_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vwsub_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m( @@ -1065,7 +1065,7 @@ vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vwsub_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m( @@ -1074,7 +1074,7 @@ vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwsub_wv_i64m8_m(mask, op1, op2, vl); + return __riscv_vwsub_wv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m( @@ -1083,6 +1083,6 @@ vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m8_m(mask, op1, op2, vl); + return __riscv_vwsub_wx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsubu.c index 4daa6d6ed6f3..0d3464570f13 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsubu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_vv_u16mf4(op1, op2, vl); + return __riscv_vwsubu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4( @@ -21,7 +21,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf4(op1, op2, vl); + return __riscv_vwsubu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4( @@ -30,7 +30,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_wv_u16mf4(op1, op2, vl); + return __riscv_vwsubu_wv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4( @@ -39,7 +39,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf4(op1, op2, vl); + return __riscv_vwsubu_wx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2( @@ -48,7 +48,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_vv_u16mf2(op1, op2, vl); + return __riscv_vwsubu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2( @@ -57,7 +57,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf2(op1, op2, vl); + return __riscv_vwsubu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2( @@ -66,7 +66,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_wv_u16mf2(op1, op2, vl); + return __riscv_vwsubu_wv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2( @@ -75,7 +75,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf2(op1, op2, vl); + return __riscv_vwsubu_wx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1( @@ -84,7 +84,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_vv_u16m1(op1, op2, vl); + return __riscv_vwsubu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1( @@ -93,7 +93,7 @@ vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m1(op1, op2, vl); + return __riscv_vwsubu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1( @@ -102,7 +102,7 @@ vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_wv_u16m1(op1, op2, vl); + return __riscv_vwsubu_wv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1( @@ -111,7 +111,7 @@ vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m1(op1, op2, vl); + return __riscv_vwsubu_wx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2( @@ -120,7 +120,7 @@ vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_vv_u16m2(op1, op2, vl); + return __riscv_vwsubu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2( @@ -129,7 +129,7 @@ vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m2(op1, op2, vl); + return __riscv_vwsubu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2( @@ -138,7 +138,7 @@ vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_wv_u16m2(op1, op2, vl); + return __riscv_vwsubu_wv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2( @@ -147,7 +147,7 @@ vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m2(op1, op2, vl); + return __riscv_vwsubu_wx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4( @@ -156,7 +156,7 @@ vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_vv_u16m4(op1, op2, vl); + return __riscv_vwsubu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4( @@ -165,7 +165,7 @@ vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m4(op1, op2, vl); + return __riscv_vwsubu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4( @@ -174,7 +174,7 @@ vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_wv_u16m4(op1, op2, vl); + return __riscv_vwsubu_wv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4( @@ -183,7 +183,7 @@ vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m4(op1, op2, vl); + return __riscv_vwsubu_wx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8( @@ -192,7 +192,7 @@ vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_vv_u16m8(op1, op2, vl); + return __riscv_vwsubu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8( @@ -201,7 +201,7 @@ vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m8(op1, op2, vl); + return __riscv_vwsubu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8( @@ -210,7 +210,7 @@ vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_wv_u16m8(op1, op2, vl); + return __riscv_vwsubu_wv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8( @@ -219,7 +219,7 @@ vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m8(op1, op2, vl); + return __riscv_vwsubu_wx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2( @@ -228,7 +228,7 @@ vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_vv_u32mf2(op1, op2, vl); + return __riscv_vwsubu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2( @@ -237,7 +237,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32mf2(op1, op2, vl); + return __riscv_vwsubu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2( @@ -246,7 +246,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_wv_u32mf2(op1, op2, vl); + return __riscv_vwsubu_wv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32mf2(op1, op2, vl); + return __riscv_vwsubu_wx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_vv_u32m1(op1, op2, vl); + return __riscv_vwsubu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m1(op1, op2, vl); + return __riscv_vwsubu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1( @@ -282,7 +282,7 @@ vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_wv_u32m1(op1, op2, vl); + return __riscv_vwsubu_wv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1( @@ -291,7 +291,7 @@ vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m1(op1, op2, vl); + return __riscv_vwsubu_wx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2( @@ -300,7 +300,7 @@ vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_vv_u32m2(op1, op2, vl); + return __riscv_vwsubu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2( @@ -309,7 +309,7 @@ vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m2(op1, op2, vl); + return __riscv_vwsubu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2( @@ -318,7 +318,7 @@ vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_wv_u32m2(op1, op2, vl); + return __riscv_vwsubu_wv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2( @@ -327,7 +327,7 @@ vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m2(op1, op2, vl); + return __riscv_vwsubu_wx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4( @@ -336,7 +336,7 @@ vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_vv_u32m4(op1, op2, vl); + return __riscv_vwsubu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4( @@ -345,7 +345,7 @@ vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m4(op1, op2, vl); + return __riscv_vwsubu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4( @@ -354,7 +354,7 @@ vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_wv_u32m4(op1, op2, vl); + return __riscv_vwsubu_wv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4( @@ -363,7 +363,7 @@ vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m4(op1, op2, vl); + return __riscv_vwsubu_wx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8( @@ -372,7 +372,7 @@ vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_vv_u32m8(op1, op2, vl); + return __riscv_vwsubu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8( @@ -381,7 +381,7 @@ vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m8(op1, op2, vl); + return __riscv_vwsubu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8( @@ -390,7 +390,7 @@ vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_wv_u32m8(op1, op2, vl); + return __riscv_vwsubu_wv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8( @@ -399,7 +399,7 @@ vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m8(op1, op2, vl); + return __riscv_vwsubu_wx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1( @@ -408,7 +408,7 @@ vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_vv_u64m1(op1, op2, vl); + return __riscv_vwsubu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1( @@ -417,7 +417,7 @@ vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m1(op1, op2, vl); + return __riscv_vwsubu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1( @@ -426,7 +426,7 @@ vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_wv_u64m1(op1, op2, vl); + return __riscv_vwsubu_wv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1( @@ -435,7 +435,7 @@ vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m1(op1, op2, vl); + return __riscv_vwsubu_wx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2( @@ -444,7 +444,7 @@ vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_vv_u64m2(op1, op2, vl); + return __riscv_vwsubu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2( @@ -453,7 +453,7 @@ vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m2(op1, op2, vl); + return __riscv_vwsubu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2( @@ -462,7 +462,7 @@ vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_wv_u64m2(op1, op2, vl); + return __riscv_vwsubu_wv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2( @@ -471,7 +471,7 @@ vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m2(op1, op2, vl); + return __riscv_vwsubu_wx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4( @@ -480,7 +480,7 @@ vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_vv_u64m4(op1, op2, vl); + return __riscv_vwsubu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4( @@ -489,7 +489,7 @@ vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m4(op1, op2, vl); + return __riscv_vwsubu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4( @@ -498,7 +498,7 @@ vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_wv_u64m4(op1, op2, vl); + return __riscv_vwsubu_wv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4( @@ -507,7 +507,7 @@ vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m4(op1, op2, vl); + return __riscv_vwsubu_wx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8( @@ -516,7 +516,7 @@ vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_vv_u64m8(op1, op2, vl); + return __riscv_vwsubu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8( @@ -525,7 +525,7 @@ vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m8(op1, op2, vl); + return __riscv_vwsubu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8( @@ -534,7 +534,7 @@ vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_wv_u64m8(op1, op2, vl); + return __riscv_vwsubu_wv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8( @@ -543,7 +543,7 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m8(op1, op2, vl); + return __riscv_vwsubu_wx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m( @@ -552,7 +552,7 @@ vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m( @@ -561,7 +561,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m( @@ -570,7 +570,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_wv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m( @@ -579,7 +579,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m( @@ -588,7 +588,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m( @@ -597,7 +597,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m( @@ -606,7 +606,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_wv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m( @@ -615,7 +615,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m( @@ -624,7 +624,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m( @@ -633,7 +633,7 @@ vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m( @@ -642,7 +642,7 @@ vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_wv_u16m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m( @@ -651,7 +651,7 @@ vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m( @@ -660,7 +660,7 @@ vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m( @@ -669,7 +669,7 @@ vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m( @@ -678,7 +678,7 @@ vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_wv_u16m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m( @@ -687,7 +687,7 @@ vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m( @@ -696,7 +696,7 @@ vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m( @@ -705,7 +705,7 @@ vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m( @@ -714,7 +714,7 @@ vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_wv_u16m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m( @@ -723,7 +723,7 @@ vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m( @@ -732,7 +732,7 @@ vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m( @@ -741,7 +741,7 @@ vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m( @@ -750,7 +750,7 @@ vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_wv_u16m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m( @@ -759,7 +759,7 @@ vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m( @@ -768,7 +768,7 @@ vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m( @@ -777,7 +777,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m( @@ -786,7 +786,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_wv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m( @@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m( @@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m( @@ -813,7 +813,7 @@ vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m( @@ -822,7 +822,7 @@ vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_wv_u32m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m( @@ -831,7 +831,7 @@ vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m( @@ -840,7 +840,7 @@ vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m( @@ -849,7 +849,7 @@ vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m( @@ -858,7 +858,7 @@ vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_wv_u32m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m( @@ -867,7 +867,7 @@ vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m( @@ -876,7 +876,7 @@ vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m( @@ -885,7 +885,7 @@ vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m( @@ -894,7 +894,7 @@ vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_wv_u32m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m( @@ -903,7 +903,7 @@ vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m( @@ -912,7 +912,7 @@ vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m( @@ -921,7 +921,7 @@ vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m( @@ -930,7 +930,7 @@ vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_wv_u32m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m( @@ -939,7 +939,7 @@ vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m( @@ -948,7 +948,7 @@ vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m( @@ -957,7 +957,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m( @@ -966,7 +966,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_wv_u64m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m( @@ -975,7 +975,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m1_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m( @@ -984,7 +984,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m( @@ -993,7 +993,7 @@ vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m( @@ -1002,7 +1002,7 @@ vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_wv_u64m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m( @@ -1011,7 +1011,7 @@ vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m2_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m( @@ -1020,7 +1020,7 @@ vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m( @@ -1029,7 +1029,7 @@ vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m( @@ -1038,7 +1038,7 @@ vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_wv_u64m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m( @@ -1047,7 +1047,7 @@ vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m4_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m( @@ -1056,7 +1056,7 @@ vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m( @@ -1065,7 +1065,7 @@ vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_vx_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m( @@ -1074,7 +1074,7 @@ vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_wv_u64m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_wv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m( @@ -1083,6 +1083,6 @@ vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m8_m(mask, op1, op2, vl); + return __riscv_vwsubu_wx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vxor.c index 625ddca30666..7840eae315d9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vxor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vxor_vv_i8mf8(op1, op2, vl); + return __riscv_vxor_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf8(op1, op2, vl); + return __riscv_vxor_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vxor_vv_i8mf4(op1, op2, vl); + return __riscv_vxor_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf4(op1, op2, vl); + return __riscv_vxor_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vxor_vv_i8mf2(op1, op2, vl); + return __riscv_vxor_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf2(op1, op2, vl); + return __riscv_vxor_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vxor_vv_i8m1(op1, op2, vl); + return __riscv_vxor_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m1(op1, op2, vl); + return __riscv_vxor_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vxor_vv_i8m2(op1, op2, vl); + return __riscv_vxor_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m2(op1, op2, vl); + return __riscv_vxor_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vxor_vv_i8m4(op1, op2, vl); + return __riscv_vxor_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m4(op1, op2, vl); + return __riscv_vxor_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vxor_vv_i8m8(op1, op2, vl); + return __riscv_vxor_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m8(op1, op2, vl); + return __riscv_vxor_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vxor_vv_i16mf4(op1, op2, vl); + return __riscv_vxor_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf4(op1, op2, vl); + return __riscv_vxor_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vxor_vv_i16mf2(op1, op2, vl); + return __riscv_vxor_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf2(op1, op2, vl); + return __riscv_vxor_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vxor_vv_i16m1(op1, op2, vl); + return __riscv_vxor_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m1(op1, op2, vl); + return __riscv_vxor_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vxor_vv_i16m2(op1, op2, vl); + return __riscv_vxor_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m2(op1, op2, vl); + return __riscv_vxor_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vxor_vv_i16m4(op1, op2, vl); + return __riscv_vxor_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m4(op1, op2, vl); + return __riscv_vxor_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vxor_vv_i16m8(op1, op2, vl); + return __riscv_vxor_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m8(op1, op2, vl); + return __riscv_vxor_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vxor_vv_i32mf2(op1, op2, vl); + return __riscv_vxor_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32mf2(op1, op2, vl); + return __riscv_vxor_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vxor_vv_i32m1(op1, op2, vl); + return __riscv_vxor_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m1(op1, op2, vl); + return __riscv_vxor_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vxor_vv_i32m2(op1, op2, vl); + return __riscv_vxor_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m2(op1, op2, vl); + return __riscv_vxor_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vxor_vv_i32m4(op1, op2, vl); + return __riscv_vxor_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m4(op1, op2, vl); + return __riscv_vxor_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vxor_vv_i32m8(op1, op2, vl); + return __riscv_vxor_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m8(op1, op2, vl); + return __riscv_vxor_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m1( @@ -336,7 +336,7 @@ vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vxor_vv_i64m1(op1, op2, vl); + return __riscv_vxor_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m1( @@ -345,7 +345,7 @@ vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m1(op1, op2, vl); + return __riscv_vxor_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m2( @@ -354,7 +354,7 @@ vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vxor_vv_i64m2(op1, op2, vl); + return __riscv_vxor_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m2( @@ -363,7 +363,7 @@ vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m2(op1, op2, vl); + return __riscv_vxor_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m4( @@ -372,7 +372,7 @@ vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vxor_vv_i64m4(op1, op2, vl); + return __riscv_vxor_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m4( @@ -381,7 +381,7 @@ vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m4(op1, op2, vl); + return __riscv_vxor_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m8( @@ -390,7 +390,7 @@ vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vxor_vv_i64m8(op1, op2, vl); + return __riscv_vxor_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m8( @@ -399,7 +399,7 @@ vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m8(op1, op2, vl); + return __riscv_vxor_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8( @@ -408,7 +408,7 @@ vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vxor_vv_u8mf8(op1, op2, vl); + return __riscv_vxor_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8( @@ -417,7 +417,7 @@ vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf8(op1, op2, vl); + return __riscv_vxor_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4( @@ -426,7 +426,7 @@ vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vxor_vv_u8mf4(op1, op2, vl); + return __riscv_vxor_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4( @@ -435,7 +435,7 @@ vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf4(op1, op2, vl); + return __riscv_vxor_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2( @@ -444,7 +444,7 @@ vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vxor_vv_u8mf2(op1, op2, vl); + return __riscv_vxor_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2( @@ -453,7 +453,7 @@ vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf2(op1, op2, vl); + return __riscv_vxor_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m1( @@ -462,7 +462,7 @@ vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vxor_vv_u8m1(op1, op2, vl); + return __riscv_vxor_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m1( @@ -471,7 +471,7 @@ vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m1(op1, op2, vl); + return __riscv_vxor_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m2( @@ -480,7 +480,7 @@ vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vxor_vv_u8m2(op1, op2, vl); + return __riscv_vxor_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m2( @@ -489,7 +489,7 @@ vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m2(op1, op2, vl); + return __riscv_vxor_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m4( @@ -498,7 +498,7 @@ vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vxor_vv_u8m4(op1, op2, vl); + return __riscv_vxor_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m4( @@ -507,7 +507,7 @@ vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m4(op1, op2, vl); + return __riscv_vxor_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m8( @@ -516,7 +516,7 @@ vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vxor_vv_u8m8(op1, op2, vl); + return __riscv_vxor_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m8( @@ -525,7 +525,7 @@ vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m8(op1, op2, vl); + return __riscv_vxor_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4( @@ -534,7 +534,7 @@ vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vxor_vv_u16mf4(op1, op2, vl); + return __riscv_vxor_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4( @@ -543,7 +543,7 @@ vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf4(op1, op2, vl); + return __riscv_vxor_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2( @@ -552,7 +552,7 @@ vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vxor_vv_u16mf2(op1, op2, vl); + return __riscv_vxor_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2( @@ -561,7 +561,7 @@ vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf2(op1, op2, vl); + return __riscv_vxor_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m1( @@ -570,7 +570,7 @@ vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vxor_vv_u16m1(op1, op2, vl); + return __riscv_vxor_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m1( @@ -579,7 +579,7 @@ vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m1(op1, op2, vl); + return __riscv_vxor_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m2( @@ -588,7 +588,7 @@ vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vxor_vv_u16m2(op1, op2, vl); + return __riscv_vxor_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m2( @@ -597,7 +597,7 @@ vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m2(op1, op2, vl); + return __riscv_vxor_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m4( @@ -606,7 +606,7 @@ vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vxor_vv_u16m4(op1, op2, vl); + return __riscv_vxor_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m4( @@ -615,7 +615,7 @@ vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m4(op1, op2, vl); + return __riscv_vxor_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m8( @@ -624,7 +624,7 @@ vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vxor_vv_u16m8(op1, op2, vl); + return __riscv_vxor_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m8( @@ -633,7 +633,7 @@ vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m8(op1, op2, vl); + return __riscv_vxor_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2( @@ -642,7 +642,7 @@ vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vxor_vv_u32mf2(op1, op2, vl); + return __riscv_vxor_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2( @@ -651,7 +651,7 @@ vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32mf2(op1, op2, vl); + return __riscv_vxor_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m1( @@ -660,7 +660,7 @@ vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vxor_vv_u32m1(op1, op2, vl); + return __riscv_vxor_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m1( @@ -669,7 +669,7 @@ vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m1(op1, op2, vl); + return __riscv_vxor_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m2( @@ -678,7 +678,7 @@ vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vxor_vv_u32m2(op1, op2, vl); + return __riscv_vxor_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m2( @@ -687,7 +687,7 @@ vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m2(op1, op2, vl); + return __riscv_vxor_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m4( @@ -696,7 +696,7 @@ vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vxor_vv_u32m4(op1, op2, vl); + return __riscv_vxor_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m4( @@ -705,7 +705,7 @@ vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m4(op1, op2, vl); + return __riscv_vxor_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m8( @@ -714,7 +714,7 @@ vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vxor_vv_u32m8(op1, op2, vl); + return __riscv_vxor_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m8( @@ -723,7 +723,7 @@ vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m8(op1, op2, vl); + return __riscv_vxor_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m1( @@ -732,7 +732,7 @@ vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vxor_vv_u64m1(op1, op2, vl); + return __riscv_vxor_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m1( @@ -741,7 +741,7 @@ vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m1(op1, op2, vl); + return __riscv_vxor_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m2( @@ -750,7 +750,7 @@ vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vxor_vv_u64m2(op1, op2, vl); + return __riscv_vxor_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m2( @@ -759,7 +759,7 @@ vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m2(op1, op2, vl); + return __riscv_vxor_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m4( @@ -768,7 +768,7 @@ vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vxor_vv_u64m4(op1, op2, vl); + return __riscv_vxor_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m4( @@ -777,7 +777,7 @@ vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m4(op1, op2, vl); + return __riscv_vxor_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m8( @@ -786,7 +786,7 @@ vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vxor_vv_u64m8(op1, op2, vl); + return __riscv_vxor_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m8( @@ -795,7 +795,7 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m8(op1, op2, vl); + return __riscv_vxor_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m( @@ -804,7 +804,7 @@ vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vxor_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m( @@ -813,7 +813,7 @@ vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m( @@ -822,7 +822,7 @@ vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vxor_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m( @@ -831,7 +831,7 @@ vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m( @@ -840,7 +840,7 @@ vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vxor_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m( @@ -849,7 +849,7 @@ vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m( @@ -858,7 +858,7 @@ vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vxor_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m( @@ -867,7 +867,7 @@ vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m( @@ -876,7 +876,7 @@ vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vxor_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m( @@ -885,7 +885,7 @@ vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m( @@ -894,7 +894,7 @@ vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vxor_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m( @@ -903,7 +903,7 @@ vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m( @@ -912,7 +912,7 @@ vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vxor_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m( @@ -921,7 +921,7 @@ vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m( @@ -930,7 +930,7 @@ vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vxor_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m( @@ -939,7 +939,7 @@ vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m( @@ -948,7 +948,7 @@ vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vxor_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m( @@ -957,7 +957,7 @@ vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m( @@ -966,7 +966,7 @@ vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vxor_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m( @@ -975,7 +975,7 @@ vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m( @@ -984,7 +984,7 @@ vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vxor_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m( @@ -993,7 +993,7 @@ vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m( @@ -1002,7 +1002,7 @@ vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vxor_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m( @@ -1011,7 +1011,7 @@ vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m( @@ -1020,7 +1020,7 @@ vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vxor_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m( @@ -1029,7 +1029,7 @@ vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m( @@ -1038,7 +1038,7 @@ vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vxor_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vxor_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m( @@ -1065,7 +1065,7 @@ vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m( @@ -1074,7 +1074,7 @@ vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vxor_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m( @@ -1083,7 +1083,7 @@ vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m( @@ -1092,7 +1092,7 @@ vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vxor_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m( @@ -1101,7 +1101,7 @@ vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m( @@ -1110,7 +1110,7 @@ vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vxor_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m( @@ -1119,7 +1119,7 @@ vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m( @@ -1128,7 +1128,7 @@ vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vxor_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m( @@ -1137,7 +1137,7 @@ vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m( @@ -1146,7 +1146,7 @@ vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vxor_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m( @@ -1155,7 +1155,7 @@ vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m( @@ -1164,7 +1164,7 @@ vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vxor_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m( @@ -1173,7 +1173,7 @@ vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m( @@ -1182,7 +1182,7 @@ vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vxor_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vxor_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m( @@ -1191,7 +1191,7 @@ vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vxor_vx_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m( @@ -1200,7 +1200,7 @@ vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vxor_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vxor_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vxor_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vxor_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vxor_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vxor_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vxor_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vxor_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vxor_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vxor_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vxor_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vxor_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vxor_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vxor_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vxor_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vxor_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vxor_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vxor_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vxor_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vxor_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vxor_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vxor_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vxor_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vxor_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext.c index 57d951da8a92..a7bef5c40c34 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) { - return vzext_vf2_u16mf4(op1, vl); + return __riscv_vzext_vf2_u16mf4(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2( @@ -21,7 +21,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) { - return vzext_vf2_u16mf2(op1, vl); + return __riscv_vzext_vf2_u16mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1( @@ -30,7 +30,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) { - return vzext_vf2_u16m1(op1, vl); + return __riscv_vzext_vf2_u16m1(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2( @@ -39,7 +39,7 @@ vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) { - return vzext_vf2_u16m2(op1, vl); + return __riscv_vzext_vf2_u16m2(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4( @@ -48,7 +48,7 @@ vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) { - return vzext_vf2_u16m4(op1, vl); + return __riscv_vzext_vf2_u16m4(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8( @@ -57,7 +57,7 @@ vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) { - return vzext_vf2_u16m8(op1, vl); + return __riscv_vzext_vf2_u16m8(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2( @@ -66,7 +66,7 @@ vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { - return vzext_vf4_u32mf2(op1, vl); + return __riscv_vzext_vf4_u32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1( @@ -75,7 +75,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { - return vzext_vf4_u32m1(op1, vl); + return __riscv_vzext_vf4_u32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2( @@ -84,7 +84,7 @@ vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { - return vzext_vf4_u32m2(op1, vl); + return __riscv_vzext_vf4_u32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4( @@ -93,7 +93,7 @@ vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { - return vzext_vf4_u32m4(op1, vl); + return __riscv_vzext_vf4_u32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8( @@ -102,7 +102,7 @@ vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { - return vzext_vf4_u32m8(op1, vl); + return __riscv_vzext_vf4_u32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1( @@ -111,7 +111,7 @@ vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { - return vzext_vf8_u64m1(op1, vl); + return __riscv_vzext_vf8_u64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2( @@ -120,7 +120,7 @@ vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { - return vzext_vf8_u64m2(op1, vl); + return __riscv_vzext_vf8_u64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4( @@ -129,7 +129,7 @@ vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { - return vzext_vf8_u64m4(op1, vl); + return __riscv_vzext_vf8_u64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8( @@ -138,7 +138,7 @@ vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { - return vzext_vf8_u64m8(op1, vl); + return __riscv_vzext_vf8_u64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2( @@ -147,7 +147,7 @@ vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) { - return vzext_vf2_u32mf2(op1, vl); + return __riscv_vzext_vf2_u32mf2(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1( @@ -156,7 +156,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) { - return vzext_vf2_u32m1(op1, vl); + return __riscv_vzext_vf2_u32m1(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2( @@ -165,7 +165,7 @@ vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) { - return vzext_vf2_u32m2(op1, vl); + return __riscv_vzext_vf2_u32m2(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4( @@ -174,7 +174,7 @@ vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) { - return vzext_vf2_u32m4(op1, vl); + return __riscv_vzext_vf2_u32m4(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8( @@ -183,7 +183,7 @@ vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) { - return vzext_vf2_u32m8(op1, vl); + return __riscv_vzext_vf2_u32m8(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1( @@ -192,7 +192,7 @@ vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { - return vzext_vf4_u64m1(op1, vl); + return __riscv_vzext_vf4_u64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2( @@ -201,7 +201,7 @@ vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { - return vzext_vf4_u64m2(op1, vl); + return __riscv_vzext_vf4_u64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4( @@ -210,7 +210,7 @@ vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { - return vzext_vf4_u64m4(op1, vl); + return __riscv_vzext_vf4_u64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8( @@ -219,7 +219,7 @@ vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { - return vzext_vf4_u64m8(op1, vl); + return __riscv_vzext_vf4_u64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1( @@ -228,7 +228,7 @@ vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) { - return vzext_vf2_u64m1(op1, vl); + return __riscv_vzext_vf2_u64m1(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2( @@ -237,7 +237,7 @@ vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) { - return vzext_vf2_u64m2(op1, vl); + return __riscv_vzext_vf2_u64m2(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4( @@ -246,7 +246,7 @@ vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) { - return vzext_vf2_u64m4(op1, vl); + return __riscv_vzext_vf2_u64m4(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8( @@ -255,7 +255,7 @@ vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) { - return vzext_vf2_u64m8(op1, vl); + return __riscv_vzext_vf2_u64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m( @@ -264,7 +264,7 @@ vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return vzext_vf2_u16mf4_m(mask, op1, vl); + return __riscv_vzext_vf2_u16mf4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m( @@ -273,7 +273,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return vzext_vf2_u16mf2_m(mask, op1, vl); + return __riscv_vzext_vf2_u16mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m( @@ -282,7 +282,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return vzext_vf2_u16m1_m(mask, op1, vl); + return __riscv_vzext_vf2_u16m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m( @@ -291,7 +291,7 @@ vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return vzext_vf2_u16m2_m(mask, op1, vl); + return __riscv_vzext_vf2_u16m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m( @@ -300,7 +300,7 @@ vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { - return vzext_vf2_u16m4_m(mask, op1, vl); + return __riscv_vzext_vf2_u16m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m( @@ -309,7 +309,7 @@ vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { - return vzext_vf2_u16m8_m(mask, op1, vl); + return __riscv_vzext_vf2_u16m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( @@ -318,7 +318,7 @@ vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return vzext_vf4_u32mf2_m(mask, op1, vl); + return __riscv_vzext_vf4_u32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( @@ -327,7 +327,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return vzext_vf4_u32m1_m(mask, op1, vl); + return __riscv_vzext_vf4_u32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( @@ -336,7 +336,7 @@ vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return vzext_vf4_u32m2_m(mask, op1, vl); + return __riscv_vzext_vf4_u32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( @@ -345,7 +345,7 @@ vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return vzext_vf4_u32m4_m(mask, op1, vl); + return __riscv_vzext_vf4_u32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( @@ -354,7 +354,7 @@ vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { - return vzext_vf4_u32m8_m(mask, op1, vl); + return __riscv_vzext_vf4_u32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( @@ -363,7 +363,7 @@ vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return vzext_vf8_u64m1_m(mask, op1, vl); + return __riscv_vzext_vf8_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( @@ -372,7 +372,7 @@ vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return vzext_vf8_u64m2_m(mask, op1, vl); + return __riscv_vzext_vf8_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( @@ -381,7 +381,7 @@ vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return vzext_vf8_u64m4_m(mask, op1, vl); + return __riscv_vzext_vf8_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( @@ -390,7 +390,7 @@ vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return vzext_vf8_u64m8_m(mask, op1, vl); + return __riscv_vzext_vf8_u64m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m( @@ -399,7 +399,7 @@ vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { - return vzext_vf2_u32mf2_m(mask, op1, vl); + return __riscv_vzext_vf2_u32mf2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m( @@ -408,7 +408,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { - return vzext_vf2_u32m1_m(mask, op1, vl); + return __riscv_vzext_vf2_u32m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m( @@ -417,7 +417,7 @@ vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { - return vzext_vf2_u32m2_m(mask, op1, vl); + return __riscv_vzext_vf2_u32m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m( @@ -426,7 +426,7 @@ vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { - return vzext_vf2_u32m4_m(mask, op1, vl); + return __riscv_vzext_vf2_u32m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m( @@ -435,7 +435,7 @@ vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { - return vzext_vf2_u32m8_m(mask, op1, vl); + return __riscv_vzext_vf2_u32m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( @@ -444,7 +444,7 @@ vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { - return vzext_vf4_u64m1_m(mask, op1, vl); + return __riscv_vzext_vf4_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( @@ -453,7 +453,7 @@ vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { - return vzext_vf4_u64m2_m(mask, op1, vl); + return __riscv_vzext_vf4_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( @@ -462,7 +462,7 @@ vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { - return vzext_vf4_u64m4_m(mask, op1, vl); + return __riscv_vzext_vf4_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( @@ -471,7 +471,7 @@ vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { - return vzext_vf4_u64m8_m(mask, op1, vl); + return __riscv_vzext_vf4_u64m8_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m( @@ -480,7 +480,7 @@ vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) { - return vzext_vf2_u64m1_m(mask, op1, vl); + return __riscv_vzext_vf2_u64m1_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m( @@ -489,7 +489,7 @@ vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { - return vzext_vf2_u64m2_m(mask, op1, vl); + return __riscv_vzext_vf2_u64m2_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m( @@ -498,7 +498,7 @@ vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { - return vzext_vf2_u64m4_m(mask, op1, vl); + return __riscv_vzext_vf2_u64m4_m(mask, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m( @@ -507,6 +507,6 @@ vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint32m4_t op1, size_t vl) { - return vzext_vf2_u64m8_m(mask, op1, vl); + return __riscv_vzext_vf2_u64m8_m(mask, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaadd.c index 05ebc5c0c5ca..b49d182a4bc6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaadd.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vaadd_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vaadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vaadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vaadd_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vaadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vaadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vaadd_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vaadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vaadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vaadd_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vaadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vaadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vaadd_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vaadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vaadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vaadd_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vaadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vaadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vaadd_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vaadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vaadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vaadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vaadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vaadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vaadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vaadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vaadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vaadd_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vaadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vaadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vaadd_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vaadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vaadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vaadd_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vaadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vaadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vaadd_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vaadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vaadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vaadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vaadd_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vaadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vaadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vaadd_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vaadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vaadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vaadd_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vaadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vaadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vaadd_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vaadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vaadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vaadd_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vaadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vaadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vaadd_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vaadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vaadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vaadd_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vaadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vaadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vaadd_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vaadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vaadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vaadd_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vaadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vaadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vaadd_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vaadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vaadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vaadd_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vaadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vaadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vaadd_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vaadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vaadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vaadd_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vaadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vaadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vaadd_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vaadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vaadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vaadd_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vaadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vaadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vaadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vaadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vaadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vaadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vaadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vaadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vaadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vaadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vaadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vaadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vaadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vaadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vaadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vaadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vaadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vaadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vaadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vaadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vaadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vaadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vaadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vaadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vaadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vaadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vaadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vaadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vaadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vaadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vaadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vaadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vaadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vaadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vaadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vaadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vaadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vaadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vaadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vaadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vaadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vaadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vaadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vaadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vaadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vaadd_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vaadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vaadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vaadd_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vaadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vaadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vaadd_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vaadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vaadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vaadd_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vaadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vaadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vaadd_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vaadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vaadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vaadd_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vaadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vaadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vaadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vaadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vaadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vaadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vaadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vaadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vaadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vaadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vaadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vaadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vaadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vaadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vaadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vaadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vaadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vaadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vaadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vaadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vaadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vaadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vaadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vaadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vaadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vaadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vaadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vaadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vaadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vaadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vaadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vaadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vaadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vaadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vaadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vaadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vaadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vaadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vaadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vaadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vaadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vaadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vaadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vaadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vaadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vaadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vaadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vaadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vaadd_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vaadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vaadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vaadd_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vaadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vaadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vaadd_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vaadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vaadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vaadd_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vaadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vaadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vaadd_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vaadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vaadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vaadd_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vaadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vaadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vaadd_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vaadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vaadd_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vaadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vaadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vaadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vaadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vaadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vaadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vaadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vaadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vaadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vaadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vaadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vaadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vaadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vaadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vaadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vaadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vaadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vaadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vaadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vaadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vaadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vaadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vaadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vaadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vaadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vaadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vaadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vaadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vaadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vaadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vaadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vaadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vaadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vaadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vaadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vaadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vaadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vaadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vaadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vaadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vaadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vaadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vaadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vaadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vaadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vaadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaaddu.c index 24d8118e4c39..8d08d44f9363 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaaddu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vaaddu_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vaaddu_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vaaddu_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vaaddu_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vaaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vaaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vaaddu_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vaaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vaaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vaaddu_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vaaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vaaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vaaddu_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m8_t test_vaaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_tu( @@ -138,7 +138,7 @@ vuint8m8_t test_vaaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vaaddu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_tu( @@ -147,7 +147,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_tu( @@ -156,7 +156,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vaaddu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_tu( @@ -165,7 +165,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_tu( @@ -174,7 +174,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vaaddu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_tu( @@ -183,7 +183,7 @@ vuint16m1_t test_vaaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vaaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vaaddu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vaaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_tu( @@ -210,7 +210,7 @@ vuint16m2_t test_vaaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vaaddu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_tu( @@ -219,7 +219,7 @@ vuint16m4_t test_vaaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_tu( @@ -228,7 +228,7 @@ vuint16m4_t test_vaaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vaaddu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_tu( @@ -237,7 +237,7 @@ vuint16m8_t test_vaaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint16m8_t test_vaaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vaaddu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vaaddu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vaaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vaaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vaaddu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_tu( @@ -291,7 +291,7 @@ vuint32m2_t test_vaaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_tu( @@ -300,7 +300,7 @@ vuint32m2_t test_vaaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vaaddu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_tu( @@ -309,7 +309,7 @@ vuint32m4_t test_vaaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_tu( @@ -318,7 +318,7 @@ vuint32m4_t test_vaaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vaaddu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_tu( @@ -327,7 +327,7 @@ vuint32m8_t test_vaaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_tu( @@ -336,7 +336,7 @@ vuint32m8_t test_vaaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vaaddu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_tu( @@ -345,7 +345,7 @@ vuint64m1_t test_vaaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_tu( @@ -354,7 +354,7 @@ vuint64m1_t test_vaaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vaaddu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_tu( @@ -363,7 +363,7 @@ vuint64m2_t test_vaaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_tu( @@ -372,7 +372,7 @@ vuint64m2_t test_vaaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vaaddu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_tu( @@ -381,7 +381,7 @@ vuint64m4_t test_vaaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_tu( @@ -390,7 +390,7 @@ vuint64m4_t test_vaaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vaaddu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m8_t test_vaaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vaaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vaaddu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_tum( @@ -417,7 +417,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_tum( @@ -426,7 +426,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vaaddu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_tum( @@ -435,7 +435,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_tum( @@ -444,7 +444,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vaaddu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_tum( @@ -453,7 +453,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_tum( @@ -462,7 +462,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vaaddu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_tum( @@ -471,7 +471,7 @@ vuint8m1_t test_vaaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_tum( @@ -480,7 +480,7 @@ vuint8m1_t test_vaaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vaaddu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_tum( @@ -489,7 +489,7 @@ vuint8m2_t test_vaaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_tum( @@ -498,7 +498,7 @@ vuint8m2_t test_vaaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vaaddu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_tum( @@ -507,7 +507,7 @@ vuint8m4_t test_vaaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_tum( @@ -516,7 +516,7 @@ vuint8m4_t test_vaaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vaaddu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_tum( @@ -525,7 +525,7 @@ vuint8m8_t test_vaaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_tum( @@ -534,7 +534,7 @@ vuint8m8_t test_vaaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vaaddu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_tum( @@ -543,7 +543,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_tum( @@ -552,7 +552,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vaaddu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_tum( @@ -561,7 +561,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_tum( @@ -570,7 +570,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vaaddu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_tum( @@ -579,7 +579,7 @@ vuint16m1_t test_vaaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_tum( @@ -588,7 +588,7 @@ vuint16m1_t test_vaaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vaaddu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_tum( @@ -597,7 +597,7 @@ vuint16m2_t test_vaaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_tum( @@ -606,7 +606,7 @@ vuint16m2_t test_vaaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vaaddu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_tum( @@ -615,7 +615,7 @@ vuint16m4_t test_vaaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_tum( @@ -624,7 +624,7 @@ vuint16m4_t test_vaaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vaaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_tum( @@ -633,7 +633,7 @@ vuint16m8_t test_vaaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tum( @@ -642,7 +642,7 @@ vuint16m8_t test_vaaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vaaddu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tum( @@ -651,7 +651,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_tum( @@ -660,7 +660,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vaaddu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_tum( @@ -669,7 +669,7 @@ vuint32m1_t test_vaaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_tum( @@ -678,7 +678,7 @@ vuint32m1_t test_vaaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vaaddu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_tum( @@ -687,7 +687,7 @@ vuint32m2_t test_vaaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_tum( @@ -696,7 +696,7 @@ vuint32m2_t test_vaaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vaaddu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_tum( @@ -705,7 +705,7 @@ vuint32m4_t test_vaaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_tum( @@ -714,7 +714,7 @@ vuint32m4_t test_vaaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vaaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_tum( @@ -723,7 +723,7 @@ vuint32m8_t test_vaaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_tum( @@ -732,7 +732,7 @@ vuint32m8_t test_vaaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vaaddu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_tum( @@ -741,7 +741,7 @@ vuint64m1_t test_vaaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_tum( @@ -750,7 +750,7 @@ vuint64m1_t test_vaaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vaaddu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_tum( @@ -759,7 +759,7 @@ vuint64m2_t test_vaaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_tum( @@ -768,7 +768,7 @@ vuint64m2_t test_vaaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vaaddu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_tum( @@ -777,7 +777,7 @@ vuint64m4_t test_vaaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_tum( @@ -786,7 +786,7 @@ vuint64m4_t test_vaaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vaaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m8_t test_vaaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vaaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vaaddu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_tumu( @@ -813,7 +813,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_tumu( @@ -822,7 +822,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vaaddu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_tumu( @@ -831,7 +831,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_tumu( @@ -840,7 +840,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vaaddu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_tumu( @@ -849,7 +849,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_tumu( @@ -858,7 +858,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vaaddu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_tumu( @@ -867,7 +867,7 @@ vuint8m1_t test_vaaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_tumu( @@ -876,7 +876,7 @@ vuint8m1_t test_vaaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vaaddu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_tumu( @@ -885,7 +885,7 @@ vuint8m2_t test_vaaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_tumu( @@ -894,7 +894,7 @@ vuint8m2_t test_vaaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vaaddu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_tumu( @@ -903,7 +903,7 @@ vuint8m4_t test_vaaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_tumu( @@ -912,7 +912,7 @@ vuint8m4_t test_vaaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vaaddu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_tumu( @@ -921,7 +921,7 @@ vuint8m8_t test_vaaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_tumu( @@ -930,7 +930,7 @@ vuint8m8_t test_vaaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vaaddu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_tumu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_tumu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vaaddu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_tumu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_tumu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vaaddu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_tumu( @@ -975,7 +975,7 @@ vuint16m1_t test_vaaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_tumu( @@ -984,7 +984,7 @@ vuint16m1_t test_vaaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vaaddu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_tumu( @@ -993,7 +993,7 @@ vuint16m2_t test_vaaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_tumu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vaaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vaaddu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_tumu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vaaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_tumu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vaaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vaaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_tumu( @@ -1029,7 +1029,7 @@ vuint16m8_t test_vaaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_tumu( @@ -1038,7 +1038,7 @@ vuint16m8_t test_vaaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vaaddu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_tumu( @@ -1047,7 +1047,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_tumu( @@ -1056,7 +1056,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vaaddu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_tumu( @@ -1065,7 +1065,7 @@ vuint32m1_t test_vaaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_tumu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vaaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vaaddu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_tumu( @@ -1083,7 +1083,7 @@ vuint32m2_t test_vaaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_tumu( @@ -1092,7 +1092,7 @@ vuint32m2_t test_vaaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vaaddu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_tumu( @@ -1101,7 +1101,7 @@ vuint32m4_t test_vaaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_tumu( @@ -1110,7 +1110,7 @@ vuint32m4_t test_vaaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vaaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_tumu( @@ -1119,7 +1119,7 @@ vuint32m8_t test_vaaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_tumu( @@ -1128,7 +1128,7 @@ vuint32m8_t test_vaaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vaaddu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_tumu( @@ -1137,7 +1137,7 @@ vuint64m1_t test_vaaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_tumu( @@ -1146,7 +1146,7 @@ vuint64m1_t test_vaaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vaaddu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_tumu( @@ -1155,7 +1155,7 @@ vuint64m2_t test_vaaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_tumu( @@ -1164,7 +1164,7 @@ vuint64m2_t test_vaaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vaaddu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_tumu( @@ -1173,7 +1173,7 @@ vuint64m4_t test_vaaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_tumu( @@ -1182,7 +1182,7 @@ vuint64m4_t test_vaaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vaaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m8_t test_vaaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vaaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vaaddu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_mu( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_mu( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vaaddu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_mu( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_mu( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vaaddu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_mu( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_mu( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vaaddu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_mu( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vaaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_mu( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vaaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vaaddu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_mu( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vaaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_mu( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vaaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vaaddu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_mu( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vaaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_mu( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vaaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vaaddu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_mu( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vaaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vaaddu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_mu( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vaaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vaaddu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_mu( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_mu( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vaaddu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_mu( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_mu( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vaaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_mu( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vaaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_mu( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vaaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vaaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_mu( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vaaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_mu( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vaaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vaaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_mu( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vaaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_mu( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vaaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vaaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_mu( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vaaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vaaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_mu( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vaaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vaaddu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_mu( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_mu( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vaaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_mu( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vaaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_mu( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vaaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vaaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_mu( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vaaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_mu( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vaaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vaaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_mu( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vaaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_mu( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vaaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vaaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_mu( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vaaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vaaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_mu( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vaaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vaaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_mu( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vaaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_mu( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vaaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vaaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_mu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vaaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_mu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vaaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vaaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_mu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vaaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_mu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vaaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vaaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vaaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vaaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vaaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadc.c index 34545aafd987..bf9e264d2193 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadc.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_i8mf8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8mf8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vadc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_i8mf8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8mf8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vadc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_i8mf4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8mf4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vadc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_i8mf4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8mf4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vadc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_i8mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vadc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_i8mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vadc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_i8m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vadc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_i8m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vadc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_i8m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vadc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_i8m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vadc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) { - return vadc_vvm_i8m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vadc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) { - return vadc_vxm_i8m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vadc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) { - return vadc_vvm_i8m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i8m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vadc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) { - return vadc_vxm_i8m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i8m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vadc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_i16mf4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16mf4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vadc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_i16mf4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16mf4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vadc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_i16mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vadc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_i16mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vadc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_i16m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vadc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_i16m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vadc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_i16m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vadc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_i16m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vadc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_i16m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vadc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_i16m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vadc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) { - return vadc_vvm_i16m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i16m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vadc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) { - return vadc_vxm_i16m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i16m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vadc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_i32mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i32mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_i32mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i32mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_i32m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i32m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vadc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_i32m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i32m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vadc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_i32m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i32m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vadc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_i32m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i32m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vadc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_i32m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i32m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vadc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_i32m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i32m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vadc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_i32m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i32m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vadc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_i32m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i32m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vadc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_i64m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i64m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vadc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_i64m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i64m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vadc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_i64m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i64m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vadc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_i64m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i64m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vadc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_i64m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i64m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vadc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_i64m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i64m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vadc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_i64m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_i64m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vadc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_i64m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_i64m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8_tu( @@ -408,7 +408,7 @@ vint64m8_t test_vadc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_u8mf8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8mf8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8_tu( @@ -417,7 +417,7 @@ vuint8mf8_t test_vadc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_u8mf8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8mf8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4_tu( @@ -426,7 +426,7 @@ vuint8mf8_t test_vadc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_u8mf4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8mf4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4_tu( @@ -435,7 +435,7 @@ vuint8mf4_t test_vadc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_u8mf4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8mf4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2_tu( @@ -444,7 +444,7 @@ vuint8mf4_t test_vadc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_u8mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2_tu( @@ -453,7 +453,7 @@ vuint8mf2_t test_vadc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_u8mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8m1_tu( @@ -462,7 +462,7 @@ vuint8mf2_t test_vadc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_u8m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8m1_tu( @@ -471,7 +471,7 @@ vuint8m1_t test_vadc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_u8m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8m2_tu( @@ -480,7 +480,7 @@ vuint8m1_t test_vadc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_u8m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8m2_tu( @@ -489,7 +489,7 @@ vuint8m2_t test_vadc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_u8m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8m4_tu( @@ -498,7 +498,7 @@ vuint8m2_t test_vadc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) { - return vadc_vvm_u8m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8m4_tu( @@ -507,7 +507,7 @@ vuint8m4_t test_vadc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) { - return vadc_vxm_u8m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u8m8_tu( @@ -516,7 +516,7 @@ vuint8m4_t test_vadc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) { - return vadc_vvm_u8m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u8m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u8m8_tu( @@ -525,7 +525,7 @@ vuint8m8_t test_vadc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) { - return vadc_vxm_u8m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u8m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4_tu( @@ -534,7 +534,7 @@ vuint8m8_t test_vadc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_u16mf4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16mf4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4_tu( @@ -543,7 +543,7 @@ vuint16mf4_t test_vadc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_u16mf4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16mf4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2_tu( @@ -552,7 +552,7 @@ vuint16mf4_t test_vadc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_u16mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2_tu( @@ -561,7 +561,7 @@ vuint16mf2_t test_vadc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_u16mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16m1_tu( @@ -570,7 +570,7 @@ vuint16mf2_t test_vadc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_u16m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16m1_tu( @@ -579,7 +579,7 @@ vuint16m1_t test_vadc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_u16m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16m2_tu( @@ -588,7 +588,7 @@ vuint16m1_t test_vadc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_u16m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16m2_tu( @@ -597,7 +597,7 @@ vuint16m2_t test_vadc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_u16m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16m4_tu( @@ -606,7 +606,7 @@ vuint16m2_t test_vadc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_u16m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16m4_tu( @@ -615,7 +615,7 @@ vuint16m4_t test_vadc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_u16m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u16m8_tu( @@ -624,7 +624,7 @@ vuint16m4_t test_vadc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) { - return vadc_vvm_u16m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u16m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u16m8_tu( @@ -633,7 +633,7 @@ vuint16m8_t test_vadc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) { - return vadc_vxm_u16m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u16m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2_tu( @@ -642,7 +642,7 @@ vuint16m8_t test_vadc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_u32mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u32mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2_tu( @@ -651,7 +651,7 @@ vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_u32mf2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u32mf2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u32m1_tu( @@ -660,7 +660,7 @@ vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_u32m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u32m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u32m1_tu( @@ -669,7 +669,7 @@ vuint32m1_t test_vadc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_u32m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u32m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u32m2_tu( @@ -678,7 +678,7 @@ vuint32m1_t test_vadc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_u32m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u32m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u32m2_tu( @@ -687,7 +687,7 @@ vuint32m2_t test_vadc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_u32m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u32m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u32m4_tu( @@ -696,7 +696,7 @@ vuint32m2_t test_vadc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_u32m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u32m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u32m4_tu( @@ -705,7 +705,7 @@ vuint32m4_t test_vadc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_u32m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u32m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u32m8_tu( @@ -714,7 +714,7 @@ vuint32m4_t test_vadc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) { - return vadc_vvm_u32m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u32m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u32m8_tu( @@ -723,7 +723,7 @@ vuint32m8_t test_vadc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) { - return vadc_vxm_u32m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u32m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u64m1_tu( @@ -732,7 +732,7 @@ vuint32m8_t test_vadc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) { - return vadc_vvm_u64m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u64m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u64m1_tu( @@ -741,7 +741,7 @@ vuint64m1_t test_vadc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) { - return vadc_vxm_u64m1_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u64m1_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u64m2_tu( @@ -750,7 +750,7 @@ vuint64m1_t test_vadc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) { - return vadc_vvm_u64m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u64m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u64m2_tu( @@ -759,7 +759,7 @@ vuint64m2_t test_vadc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) { - return vadc_vxm_u64m2_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u64m2_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u64m4_tu( @@ -768,7 +768,7 @@ vuint64m2_t test_vadc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) { - return vadc_vvm_u64m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u64m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u64m4_tu( @@ -777,7 +777,7 @@ vuint64m4_t test_vadc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) { - return vadc_vxm_u64m4_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u64m4_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vvm_u64m8_tu( @@ -786,7 +786,7 @@ vuint64m4_t test_vadc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) { - return vadc_vvm_u64m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vvm_u64m8_tu(maskedoff, op1, op2, carryin, vl); } // CHECK-RV64-LABEL: @test_vadc_vxm_u64m8_tu( @@ -795,6 +795,6 @@ vuint64m8_t test_vadc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadc_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) { - return vadc_vxm_u64m8_tu(maskedoff, op1, op2, carryin, vl); + return __riscv_vadc_vxm_u64m8_tu(maskedoff, op1, op2, carryin, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadd.c index 5b5d5012fa9f..f1e5edb51d28 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vadd.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tu( @@ -408,7 +408,7 @@ vint64m8_t test_vadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tu( @@ -417,7 +417,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_tu( @@ -426,7 +426,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_tu( @@ -435,7 +435,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_tu( @@ -444,7 +444,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_tu( @@ -453,7 +453,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_tu( @@ -462,7 +462,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_tu( @@ -471,7 +471,7 @@ vuint8m1_t test_vadd_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_tu( @@ -480,7 +480,7 @@ vuint8m1_t test_vadd_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_tu( @@ -489,7 +489,7 @@ vuint8m2_t test_vadd_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_tu( @@ -498,7 +498,7 @@ vuint8m2_t test_vadd_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_tu( @@ -507,7 +507,7 @@ vuint8m4_t test_vadd_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_tu( @@ -516,7 +516,7 @@ vuint8m4_t test_vadd_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_tu( @@ -525,7 +525,7 @@ vuint8m8_t test_vadd_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_tu( @@ -534,7 +534,7 @@ vuint8m8_t test_vadd_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_tu( @@ -543,7 +543,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_tu( @@ -552,7 +552,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_tu( @@ -561,7 +561,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_tu( @@ -570,7 +570,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_tu( @@ -579,7 +579,7 @@ vuint16m1_t test_vadd_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_tu( @@ -588,7 +588,7 @@ vuint16m1_t test_vadd_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_tu( @@ -597,7 +597,7 @@ vuint16m2_t test_vadd_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_tu( @@ -606,7 +606,7 @@ vuint16m2_t test_vadd_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_tu( @@ -615,7 +615,7 @@ vuint16m4_t test_vadd_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_tu( @@ -624,7 +624,7 @@ vuint16m4_t test_vadd_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_tu( @@ -633,7 +633,7 @@ vuint16m8_t test_vadd_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_tu( @@ -642,7 +642,7 @@ vuint16m8_t test_vadd_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_tu( @@ -651,7 +651,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_tu( @@ -660,7 +660,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_tu( @@ -669,7 +669,7 @@ vuint32m1_t test_vadd_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_tu( @@ -678,7 +678,7 @@ vuint32m1_t test_vadd_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_tu( @@ -687,7 +687,7 @@ vuint32m2_t test_vadd_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_tu( @@ -696,7 +696,7 @@ vuint32m2_t test_vadd_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_tu( @@ -705,7 +705,7 @@ vuint32m4_t test_vadd_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_tu( @@ -714,7 +714,7 @@ vuint32m4_t test_vadd_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_tu( @@ -723,7 +723,7 @@ vuint32m8_t test_vadd_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_tu( @@ -732,7 +732,7 @@ vuint32m8_t test_vadd_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_tu( @@ -741,7 +741,7 @@ vuint64m1_t test_vadd_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_tu( @@ -750,7 +750,7 @@ vuint64m1_t test_vadd_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_tu( @@ -759,7 +759,7 @@ vuint64m2_t test_vadd_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_tu( @@ -768,7 +768,7 @@ vuint64m2_t test_vadd_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_tu( @@ -777,7 +777,7 @@ vuint64m4_t test_vadd_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_tu( @@ -786,7 +786,7 @@ vuint64m4_t test_vadd_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_tu( @@ -795,7 +795,7 @@ vuint64m8_t test_vadd_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_tum( @@ -804,7 +804,7 @@ vuint64m8_t test_vadd_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tum( @@ -813,7 +813,7 @@ vint8mf8_t test_vadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_tum( @@ -822,7 +822,7 @@ vint8mf8_t test_vadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_tum( @@ -831,7 +831,7 @@ vint8mf4_t test_vadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_tum( @@ -840,7 +840,7 @@ vint8mf4_t test_vadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_tum( @@ -849,7 +849,7 @@ vint8mf2_t test_vadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_tum( @@ -858,7 +858,7 @@ vint8mf2_t test_vadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_tum( @@ -867,7 +867,7 @@ vint8m1_t test_vadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_tum( @@ -876,7 +876,7 @@ vint8m1_t test_vadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_tum( @@ -885,7 +885,7 @@ vint8m2_t test_vadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_tum( @@ -894,7 +894,7 @@ vint8m2_t test_vadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_tum( @@ -903,7 +903,7 @@ vint8m4_t test_vadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_tum( @@ -912,7 +912,7 @@ vint8m4_t test_vadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_tum( @@ -921,7 +921,7 @@ vint8m8_t test_vadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_tum( @@ -930,7 +930,7 @@ vint8m8_t test_vadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_tum( @@ -939,7 +939,7 @@ vint16mf4_t test_vadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_tum( @@ -948,7 +948,7 @@ vint16mf4_t test_vadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_tum( @@ -957,7 +957,7 @@ vint16mf2_t test_vadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_tum( @@ -966,7 +966,7 @@ vint16mf2_t test_vadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_tum( @@ -975,7 +975,7 @@ vint16m1_t test_vadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_tum( @@ -984,7 +984,7 @@ vint16m1_t test_vadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_tum( @@ -993,7 +993,7 @@ vint16m2_t test_vadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_tum( @@ -1002,7 +1002,7 @@ vint16m2_t test_vadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_tum( @@ -1011,7 +1011,7 @@ vint16m4_t test_vadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_tum( @@ -1020,7 +1020,7 @@ vint16m4_t test_vadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_tum( @@ -1029,7 +1029,7 @@ vint16m8_t test_vadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_tum( @@ -1038,7 +1038,7 @@ vint16m8_t test_vadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_tum( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_tum( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_tum( @@ -1065,7 +1065,7 @@ vint32m1_t test_vadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_tum( @@ -1074,7 +1074,7 @@ vint32m1_t test_vadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_tum( @@ -1083,7 +1083,7 @@ vint32m2_t test_vadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_tum( @@ -1092,7 +1092,7 @@ vint32m2_t test_vadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_tum( @@ -1101,7 +1101,7 @@ vint32m4_t test_vadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_tum( @@ -1110,7 +1110,7 @@ vint32m4_t test_vadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_tum( @@ -1119,7 +1119,7 @@ vint32m8_t test_vadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_tum( @@ -1128,7 +1128,7 @@ vint32m8_t test_vadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_tum( @@ -1137,7 +1137,7 @@ vint64m1_t test_vadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_tum( @@ -1146,7 +1146,7 @@ vint64m1_t test_vadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_tum( @@ -1155,7 +1155,7 @@ vint64m2_t test_vadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_tum( @@ -1164,7 +1164,7 @@ vint64m2_t test_vadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_tum( @@ -1173,7 +1173,7 @@ vint64m4_t test_vadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_tum( @@ -1182,7 +1182,7 @@ vint64m4_t test_vadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_tum( @@ -1191,7 +1191,7 @@ vint64m8_t test_vadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tum( @@ -1200,7 +1200,7 @@ vint64m8_t test_vadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tum( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_tum( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_tum( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_tum( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_tum( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_tum( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_tum( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_tum( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_tum( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_tum( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_tum( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_tum( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_tum( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_tum( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_tum( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_tum( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_tum( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_tum( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_tum( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_tum( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_tum( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_tum( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_tum( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_tum( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_tum( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_tum( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_tum( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_tum( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_tum( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_tum( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_tum( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_tum( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_tum( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_tum( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_tum( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_tum( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_tum( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_tum( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_tum( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_tum( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_tum( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_tum( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_tum( @@ -1587,7 +1587,7 @@ vuint64m8_t test_vadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_tumu( @@ -1596,7 +1596,7 @@ vuint64m8_t test_vadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_tumu( @@ -1605,7 +1605,7 @@ vint8mf8_t test_vadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_tumu( @@ -1614,7 +1614,7 @@ vint8mf8_t test_vadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_tumu( @@ -1623,7 +1623,7 @@ vint8mf4_t test_vadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_tumu( @@ -1632,7 +1632,7 @@ vint8mf4_t test_vadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_tumu( @@ -1641,7 +1641,7 @@ vint8mf2_t test_vadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_tumu( @@ -1650,7 +1650,7 @@ vint8mf2_t test_vadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_tumu( @@ -1659,7 +1659,7 @@ vint8m1_t test_vadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_tumu( @@ -1668,7 +1668,7 @@ vint8m1_t test_vadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_tumu( @@ -1677,7 +1677,7 @@ vint8m2_t test_vadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_tumu( @@ -1686,7 +1686,7 @@ vint8m2_t test_vadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_tumu( @@ -1695,7 +1695,7 @@ vint8m4_t test_vadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_tumu( @@ -1704,7 +1704,7 @@ vint8m4_t test_vadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_tumu( @@ -1713,7 +1713,7 @@ vint8m8_t test_vadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_tumu( @@ -1722,7 +1722,7 @@ vint8m8_t test_vadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_tumu( @@ -1731,7 +1731,7 @@ vint16mf4_t test_vadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_tumu( @@ -1740,7 +1740,7 @@ vint16mf4_t test_vadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_tumu( @@ -1749,7 +1749,7 @@ vint16mf2_t test_vadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_tumu( @@ -1758,7 +1758,7 @@ vint16mf2_t test_vadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_tumu( @@ -1767,7 +1767,7 @@ vint16m1_t test_vadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_tumu( @@ -1776,7 +1776,7 @@ vint16m1_t test_vadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_tumu( @@ -1785,7 +1785,7 @@ vint16m2_t test_vadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_tumu( @@ -1794,7 +1794,7 @@ vint16m2_t test_vadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_tumu( @@ -1803,7 +1803,7 @@ vint16m4_t test_vadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_tumu( @@ -1812,7 +1812,7 @@ vint16m4_t test_vadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_tumu( @@ -1821,7 +1821,7 @@ vint16m8_t test_vadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_tumu( @@ -1830,7 +1830,7 @@ vint16m8_t test_vadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_tumu( @@ -1839,7 +1839,7 @@ vint32mf2_t test_vadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_tumu( @@ -1848,7 +1848,7 @@ vint32mf2_t test_vadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_tumu( @@ -1857,7 +1857,7 @@ vint32m1_t test_vadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_tumu( @@ -1866,7 +1866,7 @@ vint32m1_t test_vadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_tumu( @@ -1875,7 +1875,7 @@ vint32m2_t test_vadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_tumu( @@ -1884,7 +1884,7 @@ vint32m2_t test_vadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_tumu( @@ -1893,7 +1893,7 @@ vint32m4_t test_vadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_tumu( @@ -1902,7 +1902,7 @@ vint32m4_t test_vadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_tumu( @@ -1911,7 +1911,7 @@ vint32m8_t test_vadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_tumu( @@ -1920,7 +1920,7 @@ vint32m8_t test_vadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_tumu( @@ -1929,7 +1929,7 @@ vint64m1_t test_vadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_tumu( @@ -1938,7 +1938,7 @@ vint64m1_t test_vadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_tumu( @@ -1947,7 +1947,7 @@ vint64m2_t test_vadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_tumu( @@ -1956,7 +1956,7 @@ vint64m2_t test_vadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_tumu( @@ -1965,7 +1965,7 @@ vint64m4_t test_vadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_tumu( @@ -1974,7 +1974,7 @@ vint64m4_t test_vadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_tumu( @@ -1983,7 +1983,7 @@ vint64m8_t test_vadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_tumu( @@ -1992,7 +1992,7 @@ vint64m8_t test_vadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_tumu( @@ -2001,7 +2001,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_tumu( @@ -2010,7 +2010,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_tumu( @@ -2019,7 +2019,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_tumu( @@ -2028,7 +2028,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_tumu( @@ -2037,7 +2037,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_tumu( @@ -2046,7 +2046,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_tumu( @@ -2055,7 +2055,7 @@ vuint8m1_t test_vadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_tumu( @@ -2064,7 +2064,7 @@ vuint8m1_t test_vadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_tumu( @@ -2073,7 +2073,7 @@ vuint8m2_t test_vadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_tumu( @@ -2082,7 +2082,7 @@ vuint8m2_t test_vadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_tumu( @@ -2091,7 +2091,7 @@ vuint8m4_t test_vadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_tumu( @@ -2100,7 +2100,7 @@ vuint8m4_t test_vadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_tumu( @@ -2109,7 +2109,7 @@ vuint8m8_t test_vadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_tumu( @@ -2118,7 +2118,7 @@ vuint8m8_t test_vadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_tumu( @@ -2127,7 +2127,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_tumu( @@ -2136,7 +2136,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_tumu( @@ -2145,7 +2145,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_tumu( @@ -2154,7 +2154,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_tumu( @@ -2163,7 +2163,7 @@ vuint16m1_t test_vadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_tumu( @@ -2172,7 +2172,7 @@ vuint16m1_t test_vadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_tumu( @@ -2181,7 +2181,7 @@ vuint16m2_t test_vadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_tumu( @@ -2190,7 +2190,7 @@ vuint16m2_t test_vadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_tumu( @@ -2199,7 +2199,7 @@ vuint16m4_t test_vadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_tumu( @@ -2208,7 +2208,7 @@ vuint16m4_t test_vadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_tumu( @@ -2217,7 +2217,7 @@ vuint16m8_t test_vadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_tumu( @@ -2226,7 +2226,7 @@ vuint16m8_t test_vadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_tumu( @@ -2235,7 +2235,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_tumu( @@ -2244,7 +2244,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_tumu( @@ -2253,7 +2253,7 @@ vuint32m1_t test_vadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_tumu( @@ -2262,7 +2262,7 @@ vuint32m1_t test_vadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_tumu( @@ -2271,7 +2271,7 @@ vuint32m2_t test_vadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_tumu( @@ -2280,7 +2280,7 @@ vuint32m2_t test_vadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_tumu( @@ -2289,7 +2289,7 @@ vuint32m4_t test_vadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_tumu( @@ -2298,7 +2298,7 @@ vuint32m4_t test_vadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_tumu( @@ -2307,7 +2307,7 @@ vuint32m8_t test_vadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_tumu( @@ -2316,7 +2316,7 @@ vuint32m8_t test_vadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_tumu( @@ -2325,7 +2325,7 @@ vuint64m1_t test_vadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_tumu( @@ -2334,7 +2334,7 @@ vuint64m1_t test_vadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_tumu( @@ -2343,7 +2343,7 @@ vuint64m2_t test_vadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_tumu( @@ -2352,7 +2352,7 @@ vuint64m2_t test_vadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_tumu( @@ -2361,7 +2361,7 @@ vuint64m4_t test_vadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_tumu( @@ -2370,7 +2370,7 @@ vuint64m4_t test_vadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_tumu( @@ -2379,7 +2379,7 @@ vuint64m8_t test_vadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_mu( @@ -2388,7 +2388,7 @@ vuint64m8_t test_vadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_mu( @@ -2397,7 +2397,7 @@ vint8mf8_t test_vadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_mu( @@ -2406,7 +2406,7 @@ vint8mf8_t test_vadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_mu( @@ -2415,7 +2415,7 @@ vint8mf4_t test_vadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_mu( @@ -2424,7 +2424,7 @@ vint8mf4_t test_vadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_mu( @@ -2433,7 +2433,7 @@ vint8mf2_t test_vadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_mu( @@ -2442,7 +2442,7 @@ vint8mf2_t test_vadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_mu( @@ -2451,7 +2451,7 @@ vint8m1_t test_vadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_mu( @@ -2460,7 +2460,7 @@ vint8m1_t test_vadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_mu( @@ -2469,7 +2469,7 @@ vint8m2_t test_vadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_mu( @@ -2478,7 +2478,7 @@ vint8m2_t test_vadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_mu( @@ -2487,7 +2487,7 @@ vint8m4_t test_vadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_mu( @@ -2496,7 +2496,7 @@ vint8m4_t test_vadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_mu( @@ -2505,7 +2505,7 @@ vint8m8_t test_vadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_mu( @@ -2514,7 +2514,7 @@ vint8m8_t test_vadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_mu( @@ -2523,7 +2523,7 @@ vint16mf4_t test_vadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_mu( @@ -2532,7 +2532,7 @@ vint16mf4_t test_vadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_mu( @@ -2541,7 +2541,7 @@ vint16mf2_t test_vadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_mu( @@ -2550,7 +2550,7 @@ vint16mf2_t test_vadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_mu( @@ -2559,7 +2559,7 @@ vint16m1_t test_vadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_mu( @@ -2568,7 +2568,7 @@ vint16m1_t test_vadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_mu( @@ -2577,7 +2577,7 @@ vint16m2_t test_vadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_mu( @@ -2586,7 +2586,7 @@ vint16m2_t test_vadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_mu( @@ -2595,7 +2595,7 @@ vint16m4_t test_vadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_mu( @@ -2604,7 +2604,7 @@ vint16m4_t test_vadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_mu( @@ -2613,7 +2613,7 @@ vint16m8_t test_vadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_mu( @@ -2622,7 +2622,7 @@ vint16m8_t test_vadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_mu( @@ -2631,7 +2631,7 @@ vint32mf2_t test_vadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_mu( @@ -2640,7 +2640,7 @@ vint32mf2_t test_vadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_mu( @@ -2649,7 +2649,7 @@ vint32m1_t test_vadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_mu( @@ -2658,7 +2658,7 @@ vint32m1_t test_vadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_mu( @@ -2667,7 +2667,7 @@ vint32m2_t test_vadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_mu( @@ -2676,7 +2676,7 @@ vint32m2_t test_vadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_mu( @@ -2685,7 +2685,7 @@ vint32m4_t test_vadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_mu( @@ -2694,7 +2694,7 @@ vint32m4_t test_vadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_mu( @@ -2703,7 +2703,7 @@ vint32m8_t test_vadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_mu( @@ -2712,7 +2712,7 @@ vint32m8_t test_vadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_mu( @@ -2721,7 +2721,7 @@ vint64m1_t test_vadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_mu( @@ -2730,7 +2730,7 @@ vint64m1_t test_vadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_mu( @@ -2739,7 +2739,7 @@ vint64m2_t test_vadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_mu( @@ -2748,7 +2748,7 @@ vint64m2_t test_vadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_mu( @@ -2757,7 +2757,7 @@ vint64m4_t test_vadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_mu( @@ -2766,7 +2766,7 @@ vint64m4_t test_vadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_mu( @@ -2775,7 +2775,7 @@ vint64m8_t test_vadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_mu( @@ -2784,7 +2784,7 @@ vint64m8_t test_vadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_mu( @@ -2793,7 +2793,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_mu( @@ -2802,7 +2802,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_mu( @@ -2811,7 +2811,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_mu( @@ -2820,7 +2820,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_mu( @@ -2829,7 +2829,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_mu( @@ -2838,7 +2838,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_mu( @@ -2847,7 +2847,7 @@ vuint8m1_t test_vadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_mu( @@ -2856,7 +2856,7 @@ vuint8m1_t test_vadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_mu( @@ -2865,7 +2865,7 @@ vuint8m2_t test_vadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_mu( @@ -2874,7 +2874,7 @@ vuint8m2_t test_vadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_mu( @@ -2883,7 +2883,7 @@ vuint8m4_t test_vadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_mu( @@ -2892,7 +2892,7 @@ vuint8m4_t test_vadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_mu( @@ -2901,7 +2901,7 @@ vuint8m8_t test_vadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_mu( @@ -2910,7 +2910,7 @@ vuint8m8_t test_vadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_mu( @@ -2919,7 +2919,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_mu( @@ -2928,7 +2928,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_mu( @@ -2937,7 +2937,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_mu( @@ -2946,7 +2946,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_mu( @@ -2955,7 +2955,7 @@ vuint16m1_t test_vadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_mu( @@ -2964,7 +2964,7 @@ vuint16m1_t test_vadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_mu( @@ -2973,7 +2973,7 @@ vuint16m2_t test_vadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_mu( @@ -2982,7 +2982,7 @@ vuint16m2_t test_vadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_mu( @@ -2991,7 +2991,7 @@ vuint16m4_t test_vadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_mu( @@ -3000,7 +3000,7 @@ vuint16m4_t test_vadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_mu( @@ -3009,7 +3009,7 @@ vuint16m8_t test_vadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_mu( @@ -3018,7 +3018,7 @@ vuint16m8_t test_vadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_mu( @@ -3027,7 +3027,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_mu( @@ -3036,7 +3036,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_mu( @@ -3045,7 +3045,7 @@ vuint32m1_t test_vadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_mu( @@ -3054,7 +3054,7 @@ vuint32m1_t test_vadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_mu( @@ -3063,7 +3063,7 @@ vuint32m2_t test_vadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_mu( @@ -3072,7 +3072,7 @@ vuint32m2_t test_vadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_mu( @@ -3081,7 +3081,7 @@ vuint32m4_t test_vadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_mu( @@ -3090,7 +3090,7 @@ vuint32m4_t test_vadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_mu( @@ -3099,7 +3099,7 @@ vuint32m8_t test_vadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_mu( @@ -3108,7 +3108,7 @@ vuint32m8_t test_vadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_mu( @@ -3117,7 +3117,7 @@ vuint64m1_t test_vadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_mu( @@ -3126,7 +3126,7 @@ vuint64m1_t test_vadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_mu( @@ -3135,7 +3135,7 @@ vuint64m2_t test_vadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_mu( @@ -3144,7 +3144,7 @@ vuint64m2_t test_vadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_mu( @@ -3153,7 +3153,7 @@ vuint64m4_t test_vadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_mu( @@ -3162,7 +3162,7 @@ vuint64m4_t test_vadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_mu( @@ -3171,6 +3171,6 @@ vuint64m8_t test_vadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vadd_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vand.c index 6aa38f5601e6..6f71f5772955 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vand.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vand_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vand_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vand_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vand_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vand_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vand_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vand_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vand_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vand_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vand_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vand_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vand_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vand_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vand_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vand_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vand_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vand_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vand_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vand_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vand_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vand_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vand_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vand_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vand_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vand_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vand_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vand_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vand_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vand_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vand_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vand_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vand_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vand_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vand_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vand_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vand_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vand_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vand_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vand_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vand_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vand_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vand_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vand_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vand_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vand_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vand_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vand_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vand_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vand_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vand_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vand_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vand_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vand_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vand_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vand_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vand_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vand_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vand_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vand_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vand_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vand_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vand_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vand_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_tu( @@ -408,7 +408,7 @@ vint64m8_t test_vand_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vand_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_tu( @@ -417,7 +417,7 @@ vuint8mf8_t test_vand_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_tu( @@ -426,7 +426,7 @@ vuint8mf8_t test_vand_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vand_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_tu( @@ -435,7 +435,7 @@ vuint8mf4_t test_vand_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_tu( @@ -444,7 +444,7 @@ vuint8mf4_t test_vand_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vand_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_tu( @@ -453,7 +453,7 @@ vuint8mf2_t test_vand_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m1_tu( @@ -462,7 +462,7 @@ vuint8mf2_t test_vand_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vand_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m1_tu( @@ -471,7 +471,7 @@ vuint8m1_t test_vand_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m2_tu( @@ -480,7 +480,7 @@ vuint8m1_t test_vand_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vand_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m2_tu( @@ -489,7 +489,7 @@ vuint8m2_t test_vand_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m4_tu( @@ -498,7 +498,7 @@ vuint8m2_t test_vand_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vand_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m4_tu( @@ -507,7 +507,7 @@ vuint8m4_t test_vand_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m8_tu( @@ -516,7 +516,7 @@ vuint8m4_t test_vand_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vand_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m8_tu( @@ -525,7 +525,7 @@ vuint8m8_t test_vand_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_tu( @@ -534,7 +534,7 @@ vuint8m8_t test_vand_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vand_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_tu( @@ -543,7 +543,7 @@ vuint16mf4_t test_vand_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_tu( @@ -552,7 +552,7 @@ vuint16mf4_t test_vand_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vand_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_tu( @@ -561,7 +561,7 @@ vuint16mf2_t test_vand_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m1_tu( @@ -570,7 +570,7 @@ vuint16mf2_t test_vand_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vand_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m1_tu( @@ -579,7 +579,7 @@ vuint16m1_t test_vand_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m2_tu( @@ -588,7 +588,7 @@ vuint16m1_t test_vand_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vand_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m2_tu( @@ -597,7 +597,7 @@ vuint16m2_t test_vand_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m4_tu( @@ -606,7 +606,7 @@ vuint16m2_t test_vand_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vand_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m4_tu( @@ -615,7 +615,7 @@ vuint16m4_t test_vand_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m8_tu( @@ -624,7 +624,7 @@ vuint16m4_t test_vand_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vand_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m8_tu( @@ -633,7 +633,7 @@ vuint16m8_t test_vand_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tu( @@ -642,7 +642,7 @@ vuint16m8_t test_vand_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vand_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tu( @@ -651,7 +651,7 @@ vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m1_tu( @@ -660,7 +660,7 @@ vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vand_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m1_tu( @@ -669,7 +669,7 @@ vuint32m1_t test_vand_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m2_tu( @@ -678,7 +678,7 @@ vuint32m1_t test_vand_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vand_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m2_tu( @@ -687,7 +687,7 @@ vuint32m2_t test_vand_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m4_tu( @@ -696,7 +696,7 @@ vuint32m2_t test_vand_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vand_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m4_tu( @@ -705,7 +705,7 @@ vuint32m4_t test_vand_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m8_tu( @@ -714,7 +714,7 @@ vuint32m4_t test_vand_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vand_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m8_tu( @@ -723,7 +723,7 @@ vuint32m8_t test_vand_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m1_tu( @@ -732,7 +732,7 @@ vuint32m8_t test_vand_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vand_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m1_tu( @@ -741,7 +741,7 @@ vuint64m1_t test_vand_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m2_tu( @@ -750,7 +750,7 @@ vuint64m1_t test_vand_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vand_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m2_tu( @@ -759,7 +759,7 @@ vuint64m2_t test_vand_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m4_tu( @@ -768,7 +768,7 @@ vuint64m2_t test_vand_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vand_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m4_tu( @@ -777,7 +777,7 @@ vuint64m4_t test_vand_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m8_tu( @@ -786,7 +786,7 @@ vuint64m4_t test_vand_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vand_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m8_tu( @@ -795,7 +795,7 @@ vuint64m8_t test_vand_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_tum( @@ -804,7 +804,7 @@ vuint64m8_t test_vand_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vand_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_tum( @@ -813,7 +813,7 @@ vint8mf8_t test_vand_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_tum( @@ -822,7 +822,7 @@ vint8mf8_t test_vand_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vand_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_tum( @@ -831,7 +831,7 @@ vint8mf4_t test_vand_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_tum( @@ -840,7 +840,7 @@ vint8mf4_t test_vand_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vand_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_tum( @@ -849,7 +849,7 @@ vint8mf2_t test_vand_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m1_tum( @@ -858,7 +858,7 @@ vint8mf2_t test_vand_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vand_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m1_tum( @@ -867,7 +867,7 @@ vint8m1_t test_vand_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m2_tum( @@ -876,7 +876,7 @@ vint8m1_t test_vand_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vand_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m2_tum( @@ -885,7 +885,7 @@ vint8m2_t test_vand_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m4_tum( @@ -894,7 +894,7 @@ vint8m2_t test_vand_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vand_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m4_tum( @@ -903,7 +903,7 @@ vint8m4_t test_vand_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m8_tum( @@ -912,7 +912,7 @@ vint8m4_t test_vand_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vand_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m8_tum( @@ -921,7 +921,7 @@ vint8m8_t test_vand_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_tum( @@ -930,7 +930,7 @@ vint8m8_t test_vand_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vand_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_tum( @@ -939,7 +939,7 @@ vint16mf4_t test_vand_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_tum( @@ -948,7 +948,7 @@ vint16mf4_t test_vand_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vand_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_tum( @@ -957,7 +957,7 @@ vint16mf2_t test_vand_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m1_tum( @@ -966,7 +966,7 @@ vint16mf2_t test_vand_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vand_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m1_tum( @@ -975,7 +975,7 @@ vint16m1_t test_vand_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m2_tum( @@ -984,7 +984,7 @@ vint16m1_t test_vand_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vand_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m2_tum( @@ -993,7 +993,7 @@ vint16m2_t test_vand_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m4_tum( @@ -1002,7 +1002,7 @@ vint16m2_t test_vand_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vand_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m4_tum( @@ -1011,7 +1011,7 @@ vint16m4_t test_vand_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m8_tum( @@ -1020,7 +1020,7 @@ vint16m4_t test_vand_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vand_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m8_tum( @@ -1029,7 +1029,7 @@ vint16m8_t test_vand_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tum( @@ -1038,7 +1038,7 @@ vint16m8_t test_vand_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vand_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tum( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vand_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m1_tum( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vand_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vand_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m1_tum( @@ -1065,7 +1065,7 @@ vint32m1_t test_vand_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m2_tum( @@ -1074,7 +1074,7 @@ vint32m1_t test_vand_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vand_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m2_tum( @@ -1083,7 +1083,7 @@ vint32m2_t test_vand_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m4_tum( @@ -1092,7 +1092,7 @@ vint32m2_t test_vand_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vand_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m4_tum( @@ -1101,7 +1101,7 @@ vint32m4_t test_vand_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m8_tum( @@ -1110,7 +1110,7 @@ vint32m4_t test_vand_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vand_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m8_tum( @@ -1119,7 +1119,7 @@ vint32m8_t test_vand_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m1_tum( @@ -1128,7 +1128,7 @@ vint32m8_t test_vand_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vand_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m1_tum( @@ -1137,7 +1137,7 @@ vint64m1_t test_vand_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m2_tum( @@ -1146,7 +1146,7 @@ vint64m1_t test_vand_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vand_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m2_tum( @@ -1155,7 +1155,7 @@ vint64m2_t test_vand_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m4_tum( @@ -1164,7 +1164,7 @@ vint64m2_t test_vand_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vand_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m4_tum( @@ -1173,7 +1173,7 @@ vint64m4_t test_vand_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m8_tum( @@ -1182,7 +1182,7 @@ vint64m4_t test_vand_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vand_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m8_tum( @@ -1191,7 +1191,7 @@ vint64m8_t test_vand_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_tum( @@ -1200,7 +1200,7 @@ vint64m8_t test_vand_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vand_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_tum( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vand_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_tum( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vand_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vand_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_tum( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vand_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_tum( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vand_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vand_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_tum( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vand_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m1_tum( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vand_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vand_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m1_tum( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vand_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m2_tum( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vand_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vand_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m2_tum( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vand_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m4_tum( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vand_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vand_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m4_tum( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vand_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m8_tum( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vand_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vand_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m8_tum( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vand_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_tum( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vand_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vand_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_tum( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vand_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_tum( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vand_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vand_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_tum( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vand_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m1_tum( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vand_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vand_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m1_tum( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vand_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m2_tum( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vand_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vand_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m2_tum( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vand_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m4_tum( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vand_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vand_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m4_tum( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vand_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m8_tum( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vand_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vand_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m8_tum( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vand_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tum( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vand_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vand_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tum( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vand_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m1_tum( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vand_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vand_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m1_tum( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vand_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m2_tum( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vand_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vand_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m2_tum( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vand_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m4_tum( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vand_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vand_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m4_tum( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vand_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m8_tum( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vand_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vand_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m8_tum( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vand_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m1_tum( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vand_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vand_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m1_tum( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vand_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m2_tum( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vand_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vand_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m2_tum( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vand_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m4_tum( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vand_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vand_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m4_tum( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vand_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m8_tum( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vand_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vand_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m8_tum( @@ -1587,7 +1587,7 @@ vuint64m8_t test_vand_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_tumu( @@ -1596,7 +1596,7 @@ vuint64m8_t test_vand_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vand_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_tumu( @@ -1605,7 +1605,7 @@ vint8mf8_t test_vand_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_tumu( @@ -1614,7 +1614,7 @@ vint8mf8_t test_vand_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vand_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_tumu( @@ -1623,7 +1623,7 @@ vint8mf4_t test_vand_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_tumu( @@ -1632,7 +1632,7 @@ vint8mf4_t test_vand_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vand_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_tumu( @@ -1641,7 +1641,7 @@ vint8mf2_t test_vand_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m1_tumu( @@ -1650,7 +1650,7 @@ vint8mf2_t test_vand_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vand_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m1_tumu( @@ -1659,7 +1659,7 @@ vint8m1_t test_vand_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m2_tumu( @@ -1668,7 +1668,7 @@ vint8m1_t test_vand_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vand_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m2_tumu( @@ -1677,7 +1677,7 @@ vint8m2_t test_vand_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m4_tumu( @@ -1686,7 +1686,7 @@ vint8m2_t test_vand_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vand_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m4_tumu( @@ -1695,7 +1695,7 @@ vint8m4_t test_vand_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m8_tumu( @@ -1704,7 +1704,7 @@ vint8m4_t test_vand_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vand_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m8_tumu( @@ -1713,7 +1713,7 @@ vint8m8_t test_vand_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_tumu( @@ -1722,7 +1722,7 @@ vint8m8_t test_vand_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vand_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_tumu( @@ -1731,7 +1731,7 @@ vint16mf4_t test_vand_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_tumu( @@ -1740,7 +1740,7 @@ vint16mf4_t test_vand_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vand_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_tumu( @@ -1749,7 +1749,7 @@ vint16mf2_t test_vand_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m1_tumu( @@ -1758,7 +1758,7 @@ vint16mf2_t test_vand_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vand_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m1_tumu( @@ -1767,7 +1767,7 @@ vint16m1_t test_vand_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m2_tumu( @@ -1776,7 +1776,7 @@ vint16m1_t test_vand_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vand_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m2_tumu( @@ -1785,7 +1785,7 @@ vint16m2_t test_vand_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m4_tumu( @@ -1794,7 +1794,7 @@ vint16m2_t test_vand_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vand_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m4_tumu( @@ -1803,7 +1803,7 @@ vint16m4_t test_vand_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m8_tumu( @@ -1812,7 +1812,7 @@ vint16m4_t test_vand_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vand_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m8_tumu( @@ -1821,7 +1821,7 @@ vint16m8_t test_vand_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_tumu( @@ -1830,7 +1830,7 @@ vint16m8_t test_vand_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vand_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_tumu( @@ -1839,7 +1839,7 @@ vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m1_tumu( @@ -1848,7 +1848,7 @@ vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vand_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m1_tumu( @@ -1857,7 +1857,7 @@ vint32m1_t test_vand_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m2_tumu( @@ -1866,7 +1866,7 @@ vint32m1_t test_vand_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vand_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m2_tumu( @@ -1875,7 +1875,7 @@ vint32m2_t test_vand_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m4_tumu( @@ -1884,7 +1884,7 @@ vint32m2_t test_vand_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vand_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m4_tumu( @@ -1893,7 +1893,7 @@ vint32m4_t test_vand_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m8_tumu( @@ -1902,7 +1902,7 @@ vint32m4_t test_vand_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vand_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m8_tumu( @@ -1911,7 +1911,7 @@ vint32m8_t test_vand_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m1_tumu( @@ -1920,7 +1920,7 @@ vint32m8_t test_vand_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vand_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m1_tumu( @@ -1929,7 +1929,7 @@ vint64m1_t test_vand_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m2_tumu( @@ -1938,7 +1938,7 @@ vint64m1_t test_vand_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vand_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m2_tumu( @@ -1947,7 +1947,7 @@ vint64m2_t test_vand_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m4_tumu( @@ -1956,7 +1956,7 @@ vint64m2_t test_vand_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vand_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m4_tumu( @@ -1965,7 +1965,7 @@ vint64m4_t test_vand_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m8_tumu( @@ -1974,7 +1974,7 @@ vint64m4_t test_vand_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vand_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m8_tumu( @@ -1983,7 +1983,7 @@ vint64m8_t test_vand_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_tumu( @@ -1992,7 +1992,7 @@ vint64m8_t test_vand_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vand_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_tumu( @@ -2001,7 +2001,7 @@ vuint8mf8_t test_vand_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_tumu( @@ -2010,7 +2010,7 @@ vuint8mf8_t test_vand_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vand_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_tumu( @@ -2019,7 +2019,7 @@ vuint8mf4_t test_vand_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_tumu( @@ -2028,7 +2028,7 @@ vuint8mf4_t test_vand_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vand_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_tumu( @@ -2037,7 +2037,7 @@ vuint8mf2_t test_vand_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m1_tumu( @@ -2046,7 +2046,7 @@ vuint8mf2_t test_vand_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vand_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m1_tumu( @@ -2055,7 +2055,7 @@ vuint8m1_t test_vand_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m2_tumu( @@ -2064,7 +2064,7 @@ vuint8m1_t test_vand_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vand_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m2_tumu( @@ -2073,7 +2073,7 @@ vuint8m2_t test_vand_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m4_tumu( @@ -2082,7 +2082,7 @@ vuint8m2_t test_vand_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vand_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m4_tumu( @@ -2091,7 +2091,7 @@ vuint8m4_t test_vand_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m8_tumu( @@ -2100,7 +2100,7 @@ vuint8m4_t test_vand_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vand_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m8_tumu( @@ -2109,7 +2109,7 @@ vuint8m8_t test_vand_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_tumu( @@ -2118,7 +2118,7 @@ vuint8m8_t test_vand_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vand_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_tumu( @@ -2127,7 +2127,7 @@ vuint16mf4_t test_vand_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_tumu( @@ -2136,7 +2136,7 @@ vuint16mf4_t test_vand_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vand_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_tumu( @@ -2145,7 +2145,7 @@ vuint16mf2_t test_vand_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m1_tumu( @@ -2154,7 +2154,7 @@ vuint16mf2_t test_vand_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vand_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m1_tumu( @@ -2163,7 +2163,7 @@ vuint16m1_t test_vand_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m2_tumu( @@ -2172,7 +2172,7 @@ vuint16m1_t test_vand_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vand_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m2_tumu( @@ -2181,7 +2181,7 @@ vuint16m2_t test_vand_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m4_tumu( @@ -2190,7 +2190,7 @@ vuint16m2_t test_vand_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vand_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m4_tumu( @@ -2199,7 +2199,7 @@ vuint16m4_t test_vand_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m8_tumu( @@ -2208,7 +2208,7 @@ vuint16m4_t test_vand_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vand_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m8_tumu( @@ -2217,7 +2217,7 @@ vuint16m8_t test_vand_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_tumu( @@ -2226,7 +2226,7 @@ vuint16m8_t test_vand_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vand_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_tumu( @@ -2235,7 +2235,7 @@ vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m1_tumu( @@ -2244,7 +2244,7 @@ vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vand_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m1_tumu( @@ -2253,7 +2253,7 @@ vuint32m1_t test_vand_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m2_tumu( @@ -2262,7 +2262,7 @@ vuint32m1_t test_vand_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vand_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m2_tumu( @@ -2271,7 +2271,7 @@ vuint32m2_t test_vand_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m4_tumu( @@ -2280,7 +2280,7 @@ vuint32m2_t test_vand_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vand_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m4_tumu( @@ -2289,7 +2289,7 @@ vuint32m4_t test_vand_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m8_tumu( @@ -2298,7 +2298,7 @@ vuint32m4_t test_vand_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vand_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m8_tumu( @@ -2307,7 +2307,7 @@ vuint32m8_t test_vand_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m1_tumu( @@ -2316,7 +2316,7 @@ vuint32m8_t test_vand_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vand_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m1_tumu( @@ -2325,7 +2325,7 @@ vuint64m1_t test_vand_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m2_tumu( @@ -2334,7 +2334,7 @@ vuint64m1_t test_vand_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vand_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m2_tumu( @@ -2343,7 +2343,7 @@ vuint64m2_t test_vand_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m4_tumu( @@ -2352,7 +2352,7 @@ vuint64m2_t test_vand_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vand_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m4_tumu( @@ -2361,7 +2361,7 @@ vuint64m4_t test_vand_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m8_tumu( @@ -2370,7 +2370,7 @@ vuint64m4_t test_vand_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vand_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m8_tumu( @@ -2379,7 +2379,7 @@ vuint64m8_t test_vand_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_mu( @@ -2388,7 +2388,7 @@ vuint64m8_t test_vand_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vand_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_mu( @@ -2397,7 +2397,7 @@ vint8mf8_t test_vand_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_mu( @@ -2406,7 +2406,7 @@ vint8mf8_t test_vand_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vand_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_mu( @@ -2415,7 +2415,7 @@ vint8mf4_t test_vand_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_mu( @@ -2424,7 +2424,7 @@ vint8mf4_t test_vand_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vand_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_mu( @@ -2433,7 +2433,7 @@ vint8mf2_t test_vand_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m1_mu( @@ -2442,7 +2442,7 @@ vint8mf2_t test_vand_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vand_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m1_mu( @@ -2451,7 +2451,7 @@ vint8m1_t test_vand_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m2_mu( @@ -2460,7 +2460,7 @@ vint8m1_t test_vand_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vand_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m2_mu( @@ -2469,7 +2469,7 @@ vint8m2_t test_vand_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m4_mu( @@ -2478,7 +2478,7 @@ vint8m2_t test_vand_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vand_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m4_mu( @@ -2487,7 +2487,7 @@ vint8m4_t test_vand_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m8_mu( @@ -2496,7 +2496,7 @@ vint8m4_t test_vand_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vand_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m8_mu( @@ -2505,7 +2505,7 @@ vint8m8_t test_vand_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vand_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_mu( @@ -2514,7 +2514,7 @@ vint8m8_t test_vand_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vand_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_mu( @@ -2523,7 +2523,7 @@ vint16mf4_t test_vand_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_mu( @@ -2532,7 +2532,7 @@ vint16mf4_t test_vand_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vand_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_mu( @@ -2541,7 +2541,7 @@ vint16mf2_t test_vand_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m1_mu( @@ -2550,7 +2550,7 @@ vint16mf2_t test_vand_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vand_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m1_mu( @@ -2559,7 +2559,7 @@ vint16m1_t test_vand_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m2_mu( @@ -2568,7 +2568,7 @@ vint16m1_t test_vand_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vand_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m2_mu( @@ -2577,7 +2577,7 @@ vint16m2_t test_vand_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m4_mu( @@ -2586,7 +2586,7 @@ vint16m2_t test_vand_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vand_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m4_mu( @@ -2595,7 +2595,7 @@ vint16m4_t test_vand_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m8_mu( @@ -2604,7 +2604,7 @@ vint16m4_t test_vand_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vand_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m8_mu( @@ -2613,7 +2613,7 @@ vint16m8_t test_vand_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vand_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_mu( @@ -2622,7 +2622,7 @@ vint16m8_t test_vand_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vand_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_mu( @@ -2631,7 +2631,7 @@ vint32mf2_t test_vand_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m1_mu( @@ -2640,7 +2640,7 @@ vint32mf2_t test_vand_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vand_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m1_mu( @@ -2649,7 +2649,7 @@ vint32m1_t test_vand_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m2_mu( @@ -2658,7 +2658,7 @@ vint32m1_t test_vand_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vand_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m2_mu( @@ -2667,7 +2667,7 @@ vint32m2_t test_vand_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m4_mu( @@ -2676,7 +2676,7 @@ vint32m2_t test_vand_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vand_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m4_mu( @@ -2685,7 +2685,7 @@ vint32m4_t test_vand_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m8_mu( @@ -2694,7 +2694,7 @@ vint32m4_t test_vand_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vand_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m8_mu( @@ -2703,7 +2703,7 @@ vint32m8_t test_vand_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vand_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m1_mu( @@ -2712,7 +2712,7 @@ vint32m8_t test_vand_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vand_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m1_mu( @@ -2721,7 +2721,7 @@ vint64m1_t test_vand_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m2_mu( @@ -2730,7 +2730,7 @@ vint64m1_t test_vand_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vand_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m2_mu( @@ -2739,7 +2739,7 @@ vint64m2_t test_vand_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m4_mu( @@ -2748,7 +2748,7 @@ vint64m2_t test_vand_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vand_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m4_mu( @@ -2757,7 +2757,7 @@ vint64m4_t test_vand_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m8_mu( @@ -2766,7 +2766,7 @@ vint64m4_t test_vand_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vand_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m8_mu( @@ -2775,7 +2775,7 @@ vint64m8_t test_vand_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vand_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_mu( @@ -2784,7 +2784,7 @@ vint64m8_t test_vand_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vand_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_mu( @@ -2793,7 +2793,7 @@ vuint8mf8_t test_vand_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_mu( @@ -2802,7 +2802,7 @@ vuint8mf8_t test_vand_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vand_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_mu( @@ -2811,7 +2811,7 @@ vuint8mf4_t test_vand_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_mu( @@ -2820,7 +2820,7 @@ vuint8mf4_t test_vand_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vand_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_mu( @@ -2829,7 +2829,7 @@ vuint8mf2_t test_vand_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m1_mu( @@ -2838,7 +2838,7 @@ vuint8mf2_t test_vand_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vand_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m1_mu( @@ -2847,7 +2847,7 @@ vuint8m1_t test_vand_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m2_mu( @@ -2856,7 +2856,7 @@ vuint8m1_t test_vand_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vand_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m2_mu( @@ -2865,7 +2865,7 @@ vuint8m2_t test_vand_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m4_mu( @@ -2874,7 +2874,7 @@ vuint8m2_t test_vand_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vand_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m4_mu( @@ -2883,7 +2883,7 @@ vuint8m4_t test_vand_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m8_mu( @@ -2892,7 +2892,7 @@ vuint8m4_t test_vand_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vand_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m8_mu( @@ -2901,7 +2901,7 @@ vuint8m8_t test_vand_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vand_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_mu( @@ -2910,7 +2910,7 @@ vuint8m8_t test_vand_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vand_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_mu( @@ -2919,7 +2919,7 @@ vuint16mf4_t test_vand_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_mu( @@ -2928,7 +2928,7 @@ vuint16mf4_t test_vand_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vand_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_mu( @@ -2937,7 +2937,7 @@ vuint16mf2_t test_vand_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m1_mu( @@ -2946,7 +2946,7 @@ vuint16mf2_t test_vand_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vand_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m1_mu( @@ -2955,7 +2955,7 @@ vuint16m1_t test_vand_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m2_mu( @@ -2964,7 +2964,7 @@ vuint16m1_t test_vand_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vand_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m2_mu( @@ -2973,7 +2973,7 @@ vuint16m2_t test_vand_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m4_mu( @@ -2982,7 +2982,7 @@ vuint16m2_t test_vand_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vand_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m4_mu( @@ -2991,7 +2991,7 @@ vuint16m4_t test_vand_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m8_mu( @@ -3000,7 +3000,7 @@ vuint16m4_t test_vand_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vand_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m8_mu( @@ -3009,7 +3009,7 @@ vuint16m8_t test_vand_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vand_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_mu( @@ -3018,7 +3018,7 @@ vuint16m8_t test_vand_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vand_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_mu( @@ -3027,7 +3027,7 @@ vuint32mf2_t test_vand_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m1_mu( @@ -3036,7 +3036,7 @@ vuint32mf2_t test_vand_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vand_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m1_mu( @@ -3045,7 +3045,7 @@ vuint32m1_t test_vand_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m2_mu( @@ -3054,7 +3054,7 @@ vuint32m1_t test_vand_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vand_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m2_mu( @@ -3063,7 +3063,7 @@ vuint32m2_t test_vand_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m4_mu( @@ -3072,7 +3072,7 @@ vuint32m2_t test_vand_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vand_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m4_mu( @@ -3081,7 +3081,7 @@ vuint32m4_t test_vand_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m8_mu( @@ -3090,7 +3090,7 @@ vuint32m4_t test_vand_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vand_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m8_mu( @@ -3099,7 +3099,7 @@ vuint32m8_t test_vand_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vand_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m1_mu( @@ -3108,7 +3108,7 @@ vuint32m8_t test_vand_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vand_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m1_mu( @@ -3117,7 +3117,7 @@ vuint64m1_t test_vand_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m2_mu( @@ -3126,7 +3126,7 @@ vuint64m1_t test_vand_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vand_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m2_mu( @@ -3135,7 +3135,7 @@ vuint64m2_t test_vand_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m4_mu( @@ -3144,7 +3144,7 @@ vuint64m2_t test_vand_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vand_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m4_mu( @@ -3153,7 +3153,7 @@ vuint64m4_t test_vand_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m8_mu( @@ -3162,7 +3162,7 @@ vuint64m4_t test_vand_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vand_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m8_mu( @@ -3171,6 +3171,6 @@ vuint64m8_t test_vand_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vand_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vand_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasub.c index 5e127beb4707..7a8aed11aaa2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasub.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vasub_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vasub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vasub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vasub_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vasub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vasub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vasub_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vasub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vasub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vasub_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vasub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vasub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vasub_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vasub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vasub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vasub_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vasub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vasub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vasub_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vasub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vasub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vasub_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vasub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vasub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vasub_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vasub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vasub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vasub_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vasub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vasub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vasub_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vasub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vasub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vasub_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vasub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vasub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vasub_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vasub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vasub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vasub_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vasub_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vasub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vasub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vasub_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vasub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vasub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vasub_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vasub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vasub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vasub_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vasub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vasub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vasub_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vasub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vasub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vasub_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vasub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vasub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vasub_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vasub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vasub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vasub_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vasub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vasub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vasub_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vasub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vasub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vasub_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vasub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vasub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vasub_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vasub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vasub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vasub_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vasub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vasub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vasub_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vasub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vasub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vasub_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vasub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vasub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vasub_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vasub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vasub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vasub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vasub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vasub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vasub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vasub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vasub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vasub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vasub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vasub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vasub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vasub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vasub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vasub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vasub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vasub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vasub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vasub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vasub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vasub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vasub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vasub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vasub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vasub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vasub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vasub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vasub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vasub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vasub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vasub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vasub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vasub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vasub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vasub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vasub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vasub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vasub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vasub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vasub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vasub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vasub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vasub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vasub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vasub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vasub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vasub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vasub_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vasub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vasub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vasub_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vasub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vasub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vasub_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vasub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vasub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vasub_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vasub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vasub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vasub_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vasub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vasub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vasub_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vasub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vasub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vasub_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vasub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vasub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vasub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vasub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vasub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vasub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vasub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vasub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vasub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vasub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vasub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vasub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vasub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vasub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vasub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vasub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vasub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vasub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vasub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vasub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vasub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vasub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vasub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vasub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vasub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vasub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vasub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vasub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vasub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vasub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vasub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vasub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vasub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vasub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vasub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vasub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vasub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vasub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vasub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vasub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vasub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vasub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vasub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vasub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vasub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vasub_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vasub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vasub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vasub_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vasub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vasub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vasub_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vasub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vasub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vasub_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vasub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vasub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vasub_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vasub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vasub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vasub_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vasub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vasub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vasub_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vasub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vasub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vasub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vasub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vasub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vasub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vasub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vasub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vasub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vasub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vasub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vasub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vasub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vasub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vasub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vasub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vasub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vasub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vasub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vasub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vasub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vasub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vasub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vasub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vasub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vasub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vasub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vasub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vasub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vasub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vasub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vasub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vasub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vasub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vasub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vasub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vasub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vasub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vasub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vasub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vasub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vasub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vasub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vasub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vasub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vasub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vasub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vasub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vasub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vasub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasubu.c index 91dea072e148..939683475240 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vasubu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vasubu_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vasubu_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vasubu_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vasubu_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vasubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vasubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vasubu_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vasubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vasubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vasubu_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vasubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vasubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vasubu_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m8_t test_vasubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_tu( @@ -138,7 +138,7 @@ vuint8m8_t test_vasubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vasubu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_tu( @@ -147,7 +147,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_tu( @@ -156,7 +156,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vasubu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_tu( @@ -165,7 +165,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_tu( @@ -174,7 +174,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vasubu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_tu( @@ -183,7 +183,7 @@ vuint16m1_t test_vasubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vasubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vasubu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vasubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_tu( @@ -210,7 +210,7 @@ vuint16m2_t test_vasubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vasubu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_tu( @@ -219,7 +219,7 @@ vuint16m4_t test_vasubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_tu( @@ -228,7 +228,7 @@ vuint16m4_t test_vasubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vasubu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_tu( @@ -237,7 +237,7 @@ vuint16m8_t test_vasubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint16m8_t test_vasubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vasubu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vasubu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vasubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vasubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vasubu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_tu( @@ -291,7 +291,7 @@ vuint32m2_t test_vasubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_tu( @@ -300,7 +300,7 @@ vuint32m2_t test_vasubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vasubu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_tu( @@ -309,7 +309,7 @@ vuint32m4_t test_vasubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_tu( @@ -318,7 +318,7 @@ vuint32m4_t test_vasubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vasubu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_tu( @@ -327,7 +327,7 @@ vuint32m8_t test_vasubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_tu( @@ -336,7 +336,7 @@ vuint32m8_t test_vasubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vasubu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_tu( @@ -345,7 +345,7 @@ vuint64m1_t test_vasubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_tu( @@ -354,7 +354,7 @@ vuint64m1_t test_vasubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vasubu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_tu( @@ -363,7 +363,7 @@ vuint64m2_t test_vasubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_tu( @@ -372,7 +372,7 @@ vuint64m2_t test_vasubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vasubu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_tu( @@ -381,7 +381,7 @@ vuint64m4_t test_vasubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_tu( @@ -390,7 +390,7 @@ vuint64m4_t test_vasubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vasubu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m8_t test_vasubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vasubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vasubu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_tum( @@ -417,7 +417,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_tum( @@ -426,7 +426,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vasubu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_tum( @@ -435,7 +435,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_tum( @@ -444,7 +444,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vasubu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_tum( @@ -453,7 +453,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_tum( @@ -462,7 +462,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vasubu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_tum( @@ -471,7 +471,7 @@ vuint8m1_t test_vasubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_tum( @@ -480,7 +480,7 @@ vuint8m1_t test_vasubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vasubu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_tum( @@ -489,7 +489,7 @@ vuint8m2_t test_vasubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_tum( @@ -498,7 +498,7 @@ vuint8m2_t test_vasubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vasubu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_tum( @@ -507,7 +507,7 @@ vuint8m4_t test_vasubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_tum( @@ -516,7 +516,7 @@ vuint8m4_t test_vasubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vasubu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_tum( @@ -525,7 +525,7 @@ vuint8m8_t test_vasubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_tum( @@ -534,7 +534,7 @@ vuint8m8_t test_vasubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vasubu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_tum( @@ -543,7 +543,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_tum( @@ -552,7 +552,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vasubu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_tum( @@ -561,7 +561,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_tum( @@ -570,7 +570,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vasubu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_tum( @@ -579,7 +579,7 @@ vuint16m1_t test_vasubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_tum( @@ -588,7 +588,7 @@ vuint16m1_t test_vasubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vasubu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_tum( @@ -597,7 +597,7 @@ vuint16m2_t test_vasubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_tum( @@ -606,7 +606,7 @@ vuint16m2_t test_vasubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vasubu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_tum( @@ -615,7 +615,7 @@ vuint16m4_t test_vasubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_tum( @@ -624,7 +624,7 @@ vuint16m4_t test_vasubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vasubu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_tum( @@ -633,7 +633,7 @@ vuint16m8_t test_vasubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tum( @@ -642,7 +642,7 @@ vuint16m8_t test_vasubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vasubu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tum( @@ -651,7 +651,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_tum( @@ -660,7 +660,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vasubu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_tum( @@ -669,7 +669,7 @@ vuint32m1_t test_vasubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_tum( @@ -678,7 +678,7 @@ vuint32m1_t test_vasubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vasubu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_tum( @@ -687,7 +687,7 @@ vuint32m2_t test_vasubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_tum( @@ -696,7 +696,7 @@ vuint32m2_t test_vasubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vasubu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_tum( @@ -705,7 +705,7 @@ vuint32m4_t test_vasubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_tum( @@ -714,7 +714,7 @@ vuint32m4_t test_vasubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vasubu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_tum( @@ -723,7 +723,7 @@ vuint32m8_t test_vasubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_tum( @@ -732,7 +732,7 @@ vuint32m8_t test_vasubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vasubu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_tum( @@ -741,7 +741,7 @@ vuint64m1_t test_vasubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_tum( @@ -750,7 +750,7 @@ vuint64m1_t test_vasubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vasubu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_tum( @@ -759,7 +759,7 @@ vuint64m2_t test_vasubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_tum( @@ -768,7 +768,7 @@ vuint64m2_t test_vasubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vasubu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_tum( @@ -777,7 +777,7 @@ vuint64m4_t test_vasubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_tum( @@ -786,7 +786,7 @@ vuint64m4_t test_vasubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vasubu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m8_t test_vasubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vasubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vasubu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_tumu( @@ -813,7 +813,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_tumu( @@ -822,7 +822,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vasubu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_tumu( @@ -831,7 +831,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_tumu( @@ -840,7 +840,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vasubu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_tumu( @@ -849,7 +849,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_tumu( @@ -858,7 +858,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vasubu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_tumu( @@ -867,7 +867,7 @@ vuint8m1_t test_vasubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_tumu( @@ -876,7 +876,7 @@ vuint8m1_t test_vasubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vasubu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_tumu( @@ -885,7 +885,7 @@ vuint8m2_t test_vasubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_tumu( @@ -894,7 +894,7 @@ vuint8m2_t test_vasubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vasubu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_tumu( @@ -903,7 +903,7 @@ vuint8m4_t test_vasubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_tumu( @@ -912,7 +912,7 @@ vuint8m4_t test_vasubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vasubu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_tumu( @@ -921,7 +921,7 @@ vuint8m8_t test_vasubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_tumu( @@ -930,7 +930,7 @@ vuint8m8_t test_vasubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vasubu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_tumu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_tumu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vasubu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_tumu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_tumu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vasubu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_tumu( @@ -975,7 +975,7 @@ vuint16m1_t test_vasubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_tumu( @@ -984,7 +984,7 @@ vuint16m1_t test_vasubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vasubu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_tumu( @@ -993,7 +993,7 @@ vuint16m2_t test_vasubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_tumu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vasubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vasubu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_tumu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vasubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_tumu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vasubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vasubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_tumu( @@ -1029,7 +1029,7 @@ vuint16m8_t test_vasubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_tumu( @@ -1038,7 +1038,7 @@ vuint16m8_t test_vasubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vasubu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_tumu( @@ -1047,7 +1047,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_tumu( @@ -1056,7 +1056,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vasubu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_tumu( @@ -1065,7 +1065,7 @@ vuint32m1_t test_vasubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_tumu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vasubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vasubu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_tumu( @@ -1083,7 +1083,7 @@ vuint32m2_t test_vasubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_tumu( @@ -1092,7 +1092,7 @@ vuint32m2_t test_vasubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vasubu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_tumu( @@ -1101,7 +1101,7 @@ vuint32m4_t test_vasubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_tumu( @@ -1110,7 +1110,7 @@ vuint32m4_t test_vasubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vasubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_tumu( @@ -1119,7 +1119,7 @@ vuint32m8_t test_vasubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_tumu( @@ -1128,7 +1128,7 @@ vuint32m8_t test_vasubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vasubu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_tumu( @@ -1137,7 +1137,7 @@ vuint64m1_t test_vasubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_tumu( @@ -1146,7 +1146,7 @@ vuint64m1_t test_vasubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vasubu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_tumu( @@ -1155,7 +1155,7 @@ vuint64m2_t test_vasubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_tumu( @@ -1164,7 +1164,7 @@ vuint64m2_t test_vasubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vasubu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_tumu( @@ -1173,7 +1173,7 @@ vuint64m4_t test_vasubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_tumu( @@ -1182,7 +1182,7 @@ vuint64m4_t test_vasubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vasubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m8_t test_vasubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vasubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vasubu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_mu( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_mu( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vasubu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_mu( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_mu( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vasubu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_mu( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_mu( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vasubu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_mu( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vasubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_mu( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vasubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vasubu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_mu( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vasubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_mu( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vasubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vasubu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_mu( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vasubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_mu( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vasubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vasubu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_mu( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vasubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vasubu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_mu( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vasubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vasubu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_mu( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_mu( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vasubu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_mu( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_mu( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vasubu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_mu( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vasubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_mu( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vasubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vasubu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_mu( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vasubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_mu( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vasubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vasubu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_mu( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vasubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_mu( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vasubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vasubu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_mu( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vasubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vasubu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_mu( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vasubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vasubu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_mu( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_mu( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vasubu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_mu( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vasubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_mu( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vasubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vasubu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_mu( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vasubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_mu( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vasubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vasubu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_mu( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vasubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_mu( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vasubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vasubu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_mu( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vasubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vasubu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_mu( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vasubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vasubu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_mu( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vasubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_mu( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vasubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vasubu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_mu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vasubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_mu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vasubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vasubu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_mu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vasubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_mu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vasubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vasubu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vasubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vasubu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vasubu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c index d3939878ae0d..980846387d58 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_f16mf4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f16mf4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_f16mf2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f16mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_f16m1_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f16m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_f16m2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f16m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_f16m4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f16m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, vbool2_t mask, size_t vl) { - return vcompress_vm_f16m8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f16m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_f32mf2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f32mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_f32m1_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f32m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_f32m2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f32m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_f32m4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f32m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_f32m8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f32m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_f64m1_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f64m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_f64m2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f64m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_f64m4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f64m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_f64m8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_f64m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_tu( @@ -148,7 +148,7 @@ vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_i8mf8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i8mf8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_tu( @@ -157,7 +157,7 @@ vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_i8mf4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i8mf4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_tu( @@ -166,7 +166,7 @@ vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_i8mf2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i8mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_tu( @@ -175,7 +175,7 @@ vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_i8m1_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i8m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_tu( @@ -184,7 +184,7 @@ vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, vbool8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_i8m2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i8m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_tu( @@ -193,7 +193,7 @@ vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, vbool4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, vbool2_t mask, size_t vl) { - return vcompress_vm_i8m4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i8m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_tu( @@ -202,7 +202,7 @@ vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, vbool2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, vbool1_t mask, size_t vl) { - return vcompress_vm_i8m8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i8m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_tu( @@ -211,7 +211,7 @@ vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, vbool1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_i16mf4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i16mf4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_tu( @@ -220,7 +220,7 @@ vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_i16mf2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i16mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_tu( @@ -229,7 +229,7 @@ vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_i16m1_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i16m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_tu( @@ -238,7 +238,7 @@ vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_i16m2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i16m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_tu( @@ -247,7 +247,7 @@ vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_i16m4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i16m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_tu( @@ -256,7 +256,7 @@ vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, vbool2_t mask, size_t vl) { - return vcompress_vm_i16m8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i16m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu( @@ -265,7 +265,7 @@ vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_i32mf2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i32mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_tu( @@ -274,7 +274,7 @@ vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_i32m1_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i32m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_tu( @@ -283,7 +283,7 @@ vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_i32m2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i32m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_tu( @@ -292,7 +292,7 @@ vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_i32m4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i32m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_tu( @@ -301,7 +301,7 @@ vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_i32m8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i32m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_tu( @@ -310,7 +310,7 @@ vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_i64m1_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i64m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_tu( @@ -319,7 +319,7 @@ vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_i64m2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i64m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_tu( @@ -328,7 +328,7 @@ vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_i64m4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i64m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_tu( @@ -337,7 +337,7 @@ vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_i64m8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_i64m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_tu( @@ -346,7 +346,7 @@ vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_u8mf8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u8mf8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_tu( @@ -355,7 +355,7 @@ vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_u8mf4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u8mf4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_tu( @@ -364,7 +364,7 @@ vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_u8mf2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u8mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_tu( @@ -373,7 +373,7 @@ vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_u8m1_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u8m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_tu( @@ -382,7 +382,7 @@ vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_u8m2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u8m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_tu( @@ -391,7 +391,7 @@ vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, vbool2_t mask, size_t vl) { - return vcompress_vm_u8m4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u8m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_tu( @@ -400,7 +400,7 @@ vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, vbool1_t mask, size_t vl) { - return vcompress_vm_u8m8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u8m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_tu( @@ -409,7 +409,7 @@ vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_u16mf4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u16mf4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_tu( @@ -418,7 +418,7 @@ vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_u16mf2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u16mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_tu( @@ -427,7 +427,7 @@ vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_u16m1_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u16m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_tu( @@ -436,7 +436,7 @@ vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_u16m2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u16m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_tu( @@ -445,7 +445,7 @@ vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_u16m4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u16m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_tu( @@ -454,7 +454,7 @@ vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, vbool2_t mask, size_t vl) { - return vcompress_vm_u16m8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u16m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu( @@ -463,7 +463,7 @@ vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_u32mf2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u32mf2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_tu( @@ -472,7 +472,7 @@ vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_u32m1_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u32m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_tu( @@ -481,7 +481,7 @@ vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_u32m2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u32m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_tu( @@ -490,7 +490,7 @@ vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_u32m4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u32m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_tu( @@ -499,7 +499,7 @@ vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, vbool4_t mask, size_t vl) { - return vcompress_vm_u32m8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u32m8_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_tu( @@ -508,7 +508,7 @@ vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, vbool64_t mask, size_t vl) { - return vcompress_vm_u64m1_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u64m1_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_tu( @@ -517,7 +517,7 @@ vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, vbool32_t mask, size_t vl) { - return vcompress_vm_u64m2_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u64m2_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_tu( @@ -526,7 +526,7 @@ vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, vbool16_t mask, size_t vl) { - return vcompress_vm_u64m4_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u64m4_tu(maskedoff, src, mask, vl); } // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_tu( @@ -535,6 +535,6 @@ vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, vbool8_t mask, size_t vl) { - return vcompress_vm_u64m8_tu(maskedoff, src, mask, vl); + return __riscv_vcompress_vm_u64m8_tu(maskedoff, src, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdiv.c index c9b267a0d0c0..4b5af4e01d53 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdiv.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vdiv_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vdiv_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vdiv_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vdiv_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vdiv_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vdiv_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vdiv_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vdiv_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vdiv_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vdiv_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vdiv_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vdiv_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vdiv_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vdiv_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vdiv_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vdiv_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vdiv_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vdiv_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vdiv_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vdiv_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vdiv_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vdiv_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vdiv_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vdiv_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vdiv_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vdiv_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vdiv_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vdiv_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vdiv_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vdiv_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vdiv_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vdiv_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vdiv_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vdiv_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vdiv_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vdiv_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vdiv_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vdiv_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vdiv_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vdiv_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vdiv_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vdiv_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vdiv_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vdiv_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vdiv_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vdiv_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vdiv_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vdiv_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vdiv_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vdiv_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vdiv_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vdiv_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vdiv_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vdiv_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vdiv_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vdiv_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vdiv_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vdiv_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vdiv_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vdiv_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vdiv_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vdiv_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vdiv_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vdiv_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vdiv_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vdiv_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vdiv_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vdiv_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vdiv_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vdiv_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vdiv_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vdiv_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vdiv_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vdiv_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vdiv_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vdiv_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vdiv_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vdiv_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vdiv_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vdiv_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vdiv_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vdiv_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vdiv_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vdiv_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vdiv_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vdiv_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vdiv_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vdiv_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vdiv_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vdiv_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vdiv_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vdiv_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vdiv_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vdiv_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vdiv_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vdiv_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vdiv_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vdiv_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vdiv_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vdiv_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vdiv_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vdiv_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vdiv_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vdiv_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vdiv_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vdiv_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vdiv_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vdiv_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vdiv_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vdiv_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vdiv_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vdiv_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vdiv_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vdiv_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vdiv_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vdiv_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vdiv_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vdiv_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vdiv_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vdiv_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vdiv_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vdiv_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vdiv_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vdiv_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vdiv_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vdiv_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vdiv_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vdiv_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vdiv_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vdiv_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vdiv_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vdiv_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vdiv_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vdiv_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vdiv_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vdiv_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vdiv_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vdiv_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vdiv_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vdiv_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vdiv_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vdiv_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vdiv_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vdiv_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vdiv_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vdiv_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vdiv_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vdiv_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vdiv_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vdiv_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vdiv_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vdiv_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vdiv_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vdiv_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vdiv_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vdiv_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vdiv_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vdiv_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vdiv_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vdiv_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vdiv_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vdiv_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vdiv_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vdiv_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vdiv_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vdiv_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vdiv_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vdiv_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vdiv_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vdiv_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vdiv_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vdiv_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vdiv_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vdiv_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vdiv_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vdiv_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vdiv_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vdiv_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vdiv_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vdiv_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vdiv_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vdiv_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vdiv_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vdiv_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vdiv_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vdiv_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vdiv_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vdiv_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vdiv_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vdiv_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vdiv_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vdiv_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vdiv_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vdiv_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vdiv_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vdiv_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vdiv_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vdiv_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vdiv_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vdiv_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vdiv_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vdiv_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vdiv_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vdiv_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vdiv_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vdiv_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vdiv_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vdiv_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vdiv_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vdiv_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vdiv_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vdiv_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vdiv_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vdiv_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vdiv_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vdiv_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vdiv_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vdiv_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vdiv_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vdiv_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vdiv_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vdiv_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vdiv_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vdiv_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vdiv_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vdiv_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vdiv_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vdiv_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vdiv_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vdiv_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vdiv_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vdiv_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vdiv_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vdiv_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vdiv_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vdiv_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vdiv_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vdiv_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vdiv_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vdiv_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vdiv_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vdiv_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vdiv_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vdiv_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vdiv_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vdiv_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vdiv_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vdiv_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vdiv_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vdiv_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vdiv_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vdiv_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vdiv_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vdiv_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vdiv_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vdiv_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vdiv_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vdiv_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vdiv_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdiv_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdivu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdivu.c index 54780e4df0dc..20d4d357af99 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdivu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vdivu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vdivu_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vdivu_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vdivu_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vdivu_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vdivu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vdivu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vdivu_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vdivu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vdivu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vdivu_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vdivu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vdivu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vdivu_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m8_t test_vdivu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_tu( @@ -138,7 +138,7 @@ vuint8m8_t test_vdivu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vdivu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_tu( @@ -147,7 +147,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_tu( @@ -156,7 +156,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vdivu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_tu( @@ -165,7 +165,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_tu( @@ -174,7 +174,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vdivu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_tu( @@ -183,7 +183,7 @@ vuint16m1_t test_vdivu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vdivu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vdivu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vdivu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_tu( @@ -210,7 +210,7 @@ vuint16m2_t test_vdivu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vdivu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_tu( @@ -219,7 +219,7 @@ vuint16m4_t test_vdivu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_tu( @@ -228,7 +228,7 @@ vuint16m4_t test_vdivu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vdivu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_tu( @@ -237,7 +237,7 @@ vuint16m8_t test_vdivu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint16m8_t test_vdivu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vdivu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vdivu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vdivu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vdivu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vdivu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_tu( @@ -291,7 +291,7 @@ vuint32m2_t test_vdivu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_tu( @@ -300,7 +300,7 @@ vuint32m2_t test_vdivu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vdivu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_tu( @@ -309,7 +309,7 @@ vuint32m4_t test_vdivu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_tu( @@ -318,7 +318,7 @@ vuint32m4_t test_vdivu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vdivu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_tu( @@ -327,7 +327,7 @@ vuint32m8_t test_vdivu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_tu( @@ -336,7 +336,7 @@ vuint32m8_t test_vdivu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vdivu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_tu( @@ -345,7 +345,7 @@ vuint64m1_t test_vdivu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_tu( @@ -354,7 +354,7 @@ vuint64m1_t test_vdivu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vdivu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_tu( @@ -363,7 +363,7 @@ vuint64m2_t test_vdivu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_tu( @@ -372,7 +372,7 @@ vuint64m2_t test_vdivu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vdivu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_tu( @@ -381,7 +381,7 @@ vuint64m4_t test_vdivu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_tu( @@ -390,7 +390,7 @@ vuint64m4_t test_vdivu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vdivu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m8_t test_vdivu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vdivu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vdivu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_tum( @@ -417,7 +417,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_tum( @@ -426,7 +426,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vdivu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_tum( @@ -435,7 +435,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_tum( @@ -444,7 +444,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vdivu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_tum( @@ -453,7 +453,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_tum( @@ -462,7 +462,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vdivu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_tum( @@ -471,7 +471,7 @@ vuint8m1_t test_vdivu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_tum( @@ -480,7 +480,7 @@ vuint8m1_t test_vdivu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vdivu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_tum( @@ -489,7 +489,7 @@ vuint8m2_t test_vdivu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_tum( @@ -498,7 +498,7 @@ vuint8m2_t test_vdivu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vdivu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_tum( @@ -507,7 +507,7 @@ vuint8m4_t test_vdivu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_tum( @@ -516,7 +516,7 @@ vuint8m4_t test_vdivu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vdivu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_tum( @@ -525,7 +525,7 @@ vuint8m8_t test_vdivu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_tum( @@ -534,7 +534,7 @@ vuint8m8_t test_vdivu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vdivu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_tum( @@ -543,7 +543,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_tum( @@ -552,7 +552,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vdivu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_tum( @@ -561,7 +561,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_tum( @@ -570,7 +570,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vdivu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_tum( @@ -579,7 +579,7 @@ vuint16m1_t test_vdivu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_tum( @@ -588,7 +588,7 @@ vuint16m1_t test_vdivu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vdivu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_tum( @@ -597,7 +597,7 @@ vuint16m2_t test_vdivu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_tum( @@ -606,7 +606,7 @@ vuint16m2_t test_vdivu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vdivu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_tum( @@ -615,7 +615,7 @@ vuint16m4_t test_vdivu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_tum( @@ -624,7 +624,7 @@ vuint16m4_t test_vdivu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vdivu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_tum( @@ -633,7 +633,7 @@ vuint16m8_t test_vdivu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tum( @@ -642,7 +642,7 @@ vuint16m8_t test_vdivu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vdivu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tum( @@ -651,7 +651,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_tum( @@ -660,7 +660,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vdivu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_tum( @@ -669,7 +669,7 @@ vuint32m1_t test_vdivu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_tum( @@ -678,7 +678,7 @@ vuint32m1_t test_vdivu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vdivu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_tum( @@ -687,7 +687,7 @@ vuint32m2_t test_vdivu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_tum( @@ -696,7 +696,7 @@ vuint32m2_t test_vdivu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vdivu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_tum( @@ -705,7 +705,7 @@ vuint32m4_t test_vdivu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_tum( @@ -714,7 +714,7 @@ vuint32m4_t test_vdivu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vdivu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_tum( @@ -723,7 +723,7 @@ vuint32m8_t test_vdivu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_tum( @@ -732,7 +732,7 @@ vuint32m8_t test_vdivu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vdivu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_tum( @@ -741,7 +741,7 @@ vuint64m1_t test_vdivu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_tum( @@ -750,7 +750,7 @@ vuint64m1_t test_vdivu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vdivu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_tum( @@ -759,7 +759,7 @@ vuint64m2_t test_vdivu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_tum( @@ -768,7 +768,7 @@ vuint64m2_t test_vdivu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vdivu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_tum( @@ -777,7 +777,7 @@ vuint64m4_t test_vdivu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_tum( @@ -786,7 +786,7 @@ vuint64m4_t test_vdivu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vdivu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m8_t test_vdivu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vdivu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vdivu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_tumu( @@ -813,7 +813,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_tumu( @@ -822,7 +822,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vdivu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_tumu( @@ -831,7 +831,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_tumu( @@ -840,7 +840,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vdivu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_tumu( @@ -849,7 +849,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_tumu( @@ -858,7 +858,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vdivu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_tumu( @@ -867,7 +867,7 @@ vuint8m1_t test_vdivu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_tumu( @@ -876,7 +876,7 @@ vuint8m1_t test_vdivu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vdivu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_tumu( @@ -885,7 +885,7 @@ vuint8m2_t test_vdivu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_tumu( @@ -894,7 +894,7 @@ vuint8m2_t test_vdivu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vdivu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_tumu( @@ -903,7 +903,7 @@ vuint8m4_t test_vdivu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_tumu( @@ -912,7 +912,7 @@ vuint8m4_t test_vdivu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vdivu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_tumu( @@ -921,7 +921,7 @@ vuint8m8_t test_vdivu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_tumu( @@ -930,7 +930,7 @@ vuint8m8_t test_vdivu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vdivu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_tumu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_tumu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vdivu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_tumu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_tumu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vdivu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_tumu( @@ -975,7 +975,7 @@ vuint16m1_t test_vdivu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_tumu( @@ -984,7 +984,7 @@ vuint16m1_t test_vdivu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vdivu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_tumu( @@ -993,7 +993,7 @@ vuint16m2_t test_vdivu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_tumu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vdivu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vdivu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_tumu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vdivu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_tumu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vdivu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vdivu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_tumu( @@ -1029,7 +1029,7 @@ vuint16m8_t test_vdivu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_tumu( @@ -1038,7 +1038,7 @@ vuint16m8_t test_vdivu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vdivu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_tumu( @@ -1047,7 +1047,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_tumu( @@ -1056,7 +1056,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vdivu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_tumu( @@ -1065,7 +1065,7 @@ vuint32m1_t test_vdivu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_tumu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vdivu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vdivu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_tumu( @@ -1083,7 +1083,7 @@ vuint32m2_t test_vdivu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_tumu( @@ -1092,7 +1092,7 @@ vuint32m2_t test_vdivu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vdivu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_tumu( @@ -1101,7 +1101,7 @@ vuint32m4_t test_vdivu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_tumu( @@ -1110,7 +1110,7 @@ vuint32m4_t test_vdivu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vdivu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_tumu( @@ -1119,7 +1119,7 @@ vuint32m8_t test_vdivu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_tumu( @@ -1128,7 +1128,7 @@ vuint32m8_t test_vdivu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vdivu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_tumu( @@ -1137,7 +1137,7 @@ vuint64m1_t test_vdivu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_tumu( @@ -1146,7 +1146,7 @@ vuint64m1_t test_vdivu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vdivu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_tumu( @@ -1155,7 +1155,7 @@ vuint64m2_t test_vdivu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_tumu( @@ -1164,7 +1164,7 @@ vuint64m2_t test_vdivu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vdivu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_tumu( @@ -1173,7 +1173,7 @@ vuint64m4_t test_vdivu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_tumu( @@ -1182,7 +1182,7 @@ vuint64m4_t test_vdivu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vdivu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m8_t test_vdivu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vdivu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vdivu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_mu( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_mu( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vdivu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_mu( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_mu( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vdivu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_mu( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_mu( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vdivu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_mu( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vdivu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_mu( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vdivu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vdivu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_mu( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vdivu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_mu( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vdivu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vdivu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_mu( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vdivu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_mu( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vdivu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vdivu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_mu( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vdivu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vdivu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_mu( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vdivu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vdivu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_mu( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_mu( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vdivu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_mu( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_mu( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vdivu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_mu( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vdivu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_mu( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vdivu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vdivu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_mu( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vdivu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_mu( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vdivu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vdivu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_mu( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vdivu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_mu( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vdivu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vdivu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_mu( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vdivu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vdivu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_mu( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vdivu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vdivu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_mu( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_mu( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vdivu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_mu( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vdivu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_mu( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vdivu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vdivu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_mu( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vdivu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_mu( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vdivu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vdivu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_mu( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vdivu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_mu( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vdivu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vdivu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_mu( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vdivu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vdivu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_mu( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vdivu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vdivu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_mu( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vdivu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_mu( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vdivu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vdivu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_mu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vdivu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_mu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vdivu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vdivu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_mu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vdivu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_mu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vdivu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vdivu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vdivu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vdivu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vdivu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfabs.c index bca4f9012a75..28f360346728 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfabs.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfabs_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfabs_v_f16mf4_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfabs_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfabs_v_f16mf2_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfabs_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfabs_v_f16m1_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfabs_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfabs_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfabs_v_f16m2_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfabs_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfabs_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfabs_v_f16m4_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfabs_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfabs_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfabs_v_f16m8_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfabs_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfabs_v_f32mf2_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfabs_v_f32m1_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfabs_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfabs_v_f32m2_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfabs_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfabs_v_f32m4_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfabs_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfabs_v_f32m8_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfabs_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfabs_v_f64m1_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfabs_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfabs_v_f64m2_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfabs_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfabs_v_f64m4_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfabs_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfabs_v_f64m8_tu(maskedoff, op1, vl); + return __riscv_vfabs_v_f64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_tum( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfabs_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfabs_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfabs_v_f16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_tum( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfabs_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfabs_v_f16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfabs_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfabs_v_f16m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m2_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfabs_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfabs_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfabs_v_f16m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m4_tum( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfabs_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfabs_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfabs_v_f16m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m8_tum( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfabs_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfabs_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfabs_v_f16m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tum( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfabs_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfabs_v_f32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfabs_v_f32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfabs_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfabs_v_f32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfabs_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfabs_v_f32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfabs_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfabs_v_f32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfabs_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfabs_v_f64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfabs_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfabs_v_f64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_tum( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfabs_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfabs_v_f64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_tum( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfabs_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfabs_v_f64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_tumu( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfabs_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfabs_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfabs_v_f16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_tumu( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfabs_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfabs_v_f16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m1_tumu( @@ -301,7 +301,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfabs_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfabs_v_f16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m2_tumu( @@ -310,7 +310,7 @@ vfloat16m1_t test_vfabs_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfabs_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfabs_v_f16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m4_tumu( @@ -319,7 +319,7 @@ vfloat16m2_t test_vfabs_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfabs_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfabs_v_f16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m8_tumu( @@ -328,7 +328,7 @@ vfloat16m4_t test_vfabs_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfabs_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfabs_v_f16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat16m8_t test_vfabs_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfabs_v_f32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfabs_v_f32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_tumu( @@ -355,7 +355,7 @@ vfloat32m1_t test_vfabs_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfabs_v_f32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_tumu( @@ -364,7 +364,7 @@ vfloat32m2_t test_vfabs_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfabs_v_f32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_tumu( @@ -373,7 +373,7 @@ vfloat32m4_t test_vfabs_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfabs_v_f32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_tumu( @@ -382,7 +382,7 @@ vfloat32m8_t test_vfabs_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfabs_v_f64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_tumu( @@ -391,7 +391,7 @@ vfloat64m1_t test_vfabs_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfabs_v_f64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_tumu( @@ -400,7 +400,7 @@ vfloat64m2_t test_vfabs_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfabs_v_f64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_tumu( @@ -409,7 +409,7 @@ vfloat64m4_t test_vfabs_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfabs_v_f64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_mu( @@ -418,7 +418,7 @@ vfloat64m8_t test_vfabs_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfabs_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfabs_v_f16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_mu( @@ -427,7 +427,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfabs_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfabs_v_f16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m1_mu( @@ -436,7 +436,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfabs_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfabs_v_f16m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m2_mu( @@ -445,7 +445,7 @@ vfloat16m1_t test_vfabs_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfabs_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfabs_v_f16m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m4_mu( @@ -454,7 +454,7 @@ vfloat16m2_t test_vfabs_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfabs_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfabs_v_f16m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f16m8_mu( @@ -463,7 +463,7 @@ vfloat16m4_t test_vfabs_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfabs_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfabs_v_f16m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_mu( @@ -472,7 +472,7 @@ vfloat16m8_t test_vfabs_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfabs_v_f32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_mu( @@ -481,7 +481,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfabs_v_f32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_mu( @@ -490,7 +490,7 @@ vfloat32m1_t test_vfabs_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfabs_v_f32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_mu( @@ -499,7 +499,7 @@ vfloat32m2_t test_vfabs_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfabs_v_f32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_mu( @@ -508,7 +508,7 @@ vfloat32m4_t test_vfabs_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfabs_v_f32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_mu( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfabs_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfabs_v_f64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_mu( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfabs_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfabs_v_f64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_mu( @@ -535,7 +535,7 @@ vfloat64m2_t test_vfabs_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfabs_v_f64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_mu( @@ -544,6 +544,6 @@ vfloat64m4_t test_vfabs_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfabs_v_f64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfabs_v_f64m8_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c index 984d8a6399b4..c56b4ff4b6ac 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfadd_vv_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfadd_vv_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfadd_vv_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfadd_vv_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfadd_vv_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfadd_vv_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfadd_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfadd_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfadd_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfadd_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfadd_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfadd_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfadd_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfadd_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfadd_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfadd_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfadd_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfadd_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfadd_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfadd_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfadd_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfadd_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfadd_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfadd_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfadd_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfadd_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfadd_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfadd_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfadd_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfadd_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfadd_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfadd_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfadd_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfadd_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfadd_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfadd_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfadd_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfadd_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfadd_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfadd_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfadd_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfadd_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfadd_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfadd_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfadd_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfadd_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfadd_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfadd_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfadd_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfadd_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfadd_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfadd_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfadd_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfadd_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfadd_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfadd_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfadd_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfadd_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfadd_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfadd_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfadd_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfadd_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfadd_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfadd_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfadd_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfadd_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfadd_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfadd_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfadd_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfadd_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfadd_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfadd_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfadd_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfadd_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfadd_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfadd_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfadd_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfadd_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfadd_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfadd_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfadd_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfadd_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfadd_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfadd_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfadd_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfadd_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfadd_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfadd_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfadd_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfadd_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfadd_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfadd_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfadd_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfadd_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfadd_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfadd_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfadd_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfadd_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfclass.c index 7d58bc9783ea..908ef3a1a731 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfclass.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfclass_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfclass_v_u16mf4_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_tu( @@ -22,7 +22,7 @@ vuint16mf4_t test_vfclass_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfclass_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfclass_v_u16mf2_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m1_tu( @@ -31,7 +31,7 @@ vuint16mf2_t test_vfclass_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfclass_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfclass_v_u16m1_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m2_tu( @@ -40,7 +40,7 @@ vuint16m1_t test_vfclass_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfclass_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfclass_v_u16m2_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m4_tu( @@ -49,7 +49,7 @@ vuint16m2_t test_vfclass_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfclass_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfclass_v_u16m4_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m8_tu( @@ -58,7 +58,7 @@ vuint16m4_t test_vfclass_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfclass_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfclass_v_u16m8_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tu( @@ -67,7 +67,7 @@ vuint16m8_t test_vfclass_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfclass_v_u32mf2_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_tu( @@ -76,7 +76,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfclass_v_u32m1_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_tu( @@ -85,7 +85,7 @@ vuint32m1_t test_vfclass_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfclass_v_u32m2_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_tu( @@ -94,7 +94,7 @@ vuint32m2_t test_vfclass_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfclass_v_u32m4_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_tu( @@ -103,7 +103,7 @@ vuint32m4_t test_vfclass_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfclass_v_u32m8_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_tu( @@ -112,7 +112,7 @@ vuint32m8_t test_vfclass_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfclass_v_u64m1_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_tu( @@ -121,7 +121,7 @@ vuint64m1_t test_vfclass_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfclass_v_u64m2_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_tu( @@ -130,7 +130,7 @@ vuint64m2_t test_vfclass_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfclass_v_u64m4_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_tu( @@ -139,7 +139,7 @@ vuint64m4_t test_vfclass_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfclass_v_u64m8_tu(maskedoff, op1, vl); + return __riscv_vfclass_v_u64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_tum( @@ -148,7 +148,7 @@ vuint64m8_t test_vfclass_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfclass_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfclass_v_u16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_tum( @@ -157,7 +157,7 @@ vuint16mf4_t test_vfclass_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfclass_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfclass_v_u16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m1_tum( @@ -166,7 +166,7 @@ vuint16mf2_t test_vfclass_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfclass_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfclass_v_u16m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m2_tum( @@ -175,7 +175,7 @@ vuint16m1_t test_vfclass_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfclass_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfclass_v_u16m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m4_tum( @@ -184,7 +184,7 @@ vuint16m2_t test_vfclass_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfclass_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfclass_v_u16m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m8_tum( @@ -193,7 +193,7 @@ vuint16m4_t test_vfclass_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfclass_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfclass_v_u16m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tum( @@ -202,7 +202,7 @@ vuint16m8_t test_vfclass_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfclass_v_u32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_tum( @@ -211,7 +211,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfclass_v_u32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_tum( @@ -220,7 +220,7 @@ vuint32m1_t test_vfclass_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfclass_v_u32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_tum( @@ -229,7 +229,7 @@ vuint32m2_t test_vfclass_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfclass_v_u32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_tum( @@ -238,7 +238,7 @@ vuint32m4_t test_vfclass_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfclass_v_u32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_tum( @@ -247,7 +247,7 @@ vuint32m8_t test_vfclass_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfclass_v_u64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_tum( @@ -256,7 +256,7 @@ vuint64m1_t test_vfclass_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfclass_v_u64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_tum( @@ -265,7 +265,7 @@ vuint64m2_t test_vfclass_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfclass_v_u64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_tum( @@ -274,7 +274,7 @@ vuint64m4_t test_vfclass_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfclass_v_u64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_tumu( @@ -283,7 +283,7 @@ vuint64m8_t test_vfclass_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfclass_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfclass_v_u16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_tumu( @@ -292,7 +292,7 @@ vuint16mf4_t test_vfclass_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfclass_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfclass_v_u16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m1_tumu( @@ -301,7 +301,7 @@ vuint16mf2_t test_vfclass_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfclass_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfclass_v_u16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m2_tumu( @@ -310,7 +310,7 @@ vuint16m1_t test_vfclass_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfclass_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfclass_v_u16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m4_tumu( @@ -319,7 +319,7 @@ vuint16m2_t test_vfclass_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfclass_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfclass_v_u16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m8_tumu( @@ -328,7 +328,7 @@ vuint16m4_t test_vfclass_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfclass_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfclass_v_u16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_tumu( @@ -337,7 +337,7 @@ vuint16m8_t test_vfclass_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfclass_v_u32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_tumu( @@ -346,7 +346,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfclass_v_u32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_tumu( @@ -355,7 +355,7 @@ vuint32m1_t test_vfclass_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfclass_v_u32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_tumu( @@ -364,7 +364,7 @@ vuint32m2_t test_vfclass_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfclass_v_u32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_tumu( @@ -373,7 +373,7 @@ vuint32m4_t test_vfclass_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfclass_v_u32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_tumu( @@ -382,7 +382,7 @@ vuint32m8_t test_vfclass_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfclass_v_u64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_tumu( @@ -391,7 +391,7 @@ vuint64m1_t test_vfclass_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfclass_v_u64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_tumu( @@ -400,7 +400,7 @@ vuint64m2_t test_vfclass_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfclass_v_u64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_tumu( @@ -409,7 +409,7 @@ vuint64m4_t test_vfclass_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfclass_v_u64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_mu( @@ -418,7 +418,7 @@ vuint64m8_t test_vfclass_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfclass_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfclass_v_u16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_mu( @@ -427,7 +427,7 @@ vuint16mf4_t test_vfclass_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfclass_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfclass_v_u16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m1_mu( @@ -436,7 +436,7 @@ vuint16mf2_t test_vfclass_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfclass_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfclass_v_u16m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m2_mu( @@ -445,7 +445,7 @@ vuint16m1_t test_vfclass_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfclass_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfclass_v_u16m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m4_mu( @@ -454,7 +454,7 @@ vuint16m2_t test_vfclass_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfclass_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfclass_v_u16m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u16m8_mu( @@ -463,7 +463,7 @@ vuint16m4_t test_vfclass_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfclass_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfclass_v_u16m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_mu( @@ -472,7 +472,7 @@ vuint16m8_t test_vfclass_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfclass_v_u32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_mu( @@ -481,7 +481,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfclass_v_u32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_mu( @@ -490,7 +490,7 @@ vuint32m1_t test_vfclass_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfclass_v_u32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_mu( @@ -499,7 +499,7 @@ vuint32m2_t test_vfclass_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfclass_v_u32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_mu( @@ -508,7 +508,7 @@ vuint32m4_t test_vfclass_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfclass_v_u32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_mu( @@ -517,7 +517,7 @@ vuint32m8_t test_vfclass_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfclass_v_u64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_mu( @@ -526,7 +526,7 @@ vuint64m1_t test_vfclass_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfclass_v_u64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_mu( @@ -535,7 +535,7 @@ vuint64m2_t test_vfclass_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfclass_v_u64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_mu( @@ -544,6 +544,6 @@ vuint64m4_t test_vfclass_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfclass_v_u64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfclass_v_u64m8_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfcvt.c index bbaff6dacab7..e4ffc37ae7ad 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfcvt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_x_f_v_i16mf4_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tu( @@ -22,7 +22,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf4_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tu( @@ -31,7 +31,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_x_f_v_i16mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tu( @@ -40,7 +40,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tu( @@ -49,7 +49,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_x_f_v_i16m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tu( @@ -58,7 +58,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tu( @@ -67,7 +67,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_x_f_v_i16m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tu( @@ -76,7 +76,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tu( @@ -85,7 +85,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_x_f_v_i16m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tu( @@ -94,7 +94,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tu( @@ -103,7 +103,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_x_f_v_i16m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tu( @@ -112,7 +112,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tu( @@ -121,7 +121,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf4_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tu( @@ -130,7 +130,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf4_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tu( @@ -139,7 +139,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tu( @@ -148,7 +148,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tu( @@ -157,7 +157,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_xu_f_v_u16m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tu( @@ -166,7 +166,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tu( @@ -175,7 +175,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_xu_f_v_u16m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tu( @@ -184,7 +184,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tu( @@ -193,7 +193,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_xu_f_v_u16m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tu( @@ -202,7 +202,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tu( @@ -211,7 +211,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_xu_f_v_u16m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tu( @@ -220,7 +220,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tu( @@ -229,7 +229,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return vfcvt_f_x_v_f16mf4_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_tu( @@ -238,7 +238,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint16mf4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return vfcvt_f_x_v_f16mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_tu( @@ -247,7 +247,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint16mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return vfcvt_f_x_v_f16m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_tu( @@ -256,7 +256,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint16m1_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return vfcvt_f_x_v_f16m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_tu( @@ -265,7 +265,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint16m2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return vfcvt_f_x_v_f16m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_tu( @@ -274,7 +274,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint16m4_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return vfcvt_f_x_v_f16m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_tu( @@ -283,7 +283,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint16m8_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf4_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_tu( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_tu( @@ -301,7 +301,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return vfcvt_f_xu_v_f16m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_tu( @@ -310,7 +310,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint16m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return vfcvt_f_xu_v_f16m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_tu( @@ -319,7 +319,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint16m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return vfcvt_f_xu_v_f16m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_tu( @@ -328,7 +328,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint16m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return vfcvt_f_xu_v_f16m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tu( @@ -337,7 +337,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint16m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_x_f_v_i32mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tu( @@ -346,7 +346,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tu( @@ -355,7 +355,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_x_f_v_i32m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tu( @@ -364,7 +364,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tu( @@ -373,7 +373,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_x_f_v_i32m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tu( @@ -382,7 +382,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tu( @@ -391,7 +391,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_x_f_v_i32m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tu( @@ -400,7 +400,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tu( @@ -409,7 +409,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_x_f_v_i32m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tu( @@ -418,7 +418,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tu( @@ -427,7 +427,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u32mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tu( @@ -436,7 +436,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tu( @@ -445,7 +445,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_xu_f_v_u32m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tu( @@ -454,7 +454,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tu( @@ -463,7 +463,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_xu_f_v_u32m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tu( @@ -472,7 +472,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tu( @@ -481,7 +481,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_xu_f_v_u32m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tu( @@ -490,7 +490,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tu( @@ -499,7 +499,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_xu_f_v_u32m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tu( @@ -508,7 +508,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tu( @@ -517,7 +517,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return vfcvt_f_x_v_f32mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_tu( @@ -526,7 +526,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint32mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return vfcvt_f_x_v_f32m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_tu( @@ -535,7 +535,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint32m1_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return vfcvt_f_x_v_f32m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_tu( @@ -544,7 +544,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint32m2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return vfcvt_f_x_v_f32m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_tu( @@ -553,7 +553,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint32m4_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return vfcvt_f_x_v_f32m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tu( @@ -562,7 +562,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint32m8_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f32mf2_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_tu( @@ -571,7 +571,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return vfcvt_f_xu_v_f32m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_tu( @@ -580,7 +580,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint32m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return vfcvt_f_xu_v_f32m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_tu( @@ -589,7 +589,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint32m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return vfcvt_f_xu_v_f32m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_tu( @@ -598,7 +598,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint32m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return vfcvt_f_xu_v_f32m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_tu( @@ -607,7 +607,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint32m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_x_f_v_i64m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tu( @@ -616,7 +616,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tu( @@ -625,7 +625,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_x_f_v_i64m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tu( @@ -634,7 +634,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tu( @@ -643,7 +643,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_x_f_v_i64m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tu( @@ -652,7 +652,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tu( @@ -661,7 +661,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_x_f_v_i64m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tu( @@ -670,7 +670,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tu( @@ -679,7 +679,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_xu_f_v_u64m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tu( @@ -688,7 +688,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tu( @@ -697,7 +697,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_xu_f_v_u64m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tu( @@ -706,7 +706,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tu( @@ -715,7 +715,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_xu_f_v_u64m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tu( @@ -724,7 +724,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tu( @@ -733,7 +733,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_xu_f_v_u64m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tu( @@ -742,7 +742,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tu( @@ -751,7 +751,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return vfcvt_f_x_v_f64m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_tu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint64m1_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return vfcvt_f_x_v_f64m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_tu( @@ -769,7 +769,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint64m2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return vfcvt_f_x_v_f64m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_tu( @@ -778,7 +778,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint64m4_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return vfcvt_f_x_v_f64m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_tu( @@ -787,7 +787,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint64m8_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return vfcvt_f_xu_v_f64m1_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_tu( @@ -796,7 +796,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint64m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return vfcvt_f_xu_v_f64m2_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_tu( @@ -805,7 +805,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint64m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return vfcvt_f_xu_v_f64m4_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_tu( @@ -814,7 +814,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint64m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return vfcvt_f_xu_v_f64m8_tu(maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_tum( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint64m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_x_f_v_i16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tum( @@ -832,7 +832,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tum( @@ -841,7 +841,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_x_f_v_i16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tum( @@ -850,7 +850,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tum( @@ -859,7 +859,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_x_f_v_i16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tum( @@ -868,7 +868,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tum( @@ -877,7 +877,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_x_f_v_i16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tum( @@ -886,7 +886,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tum( @@ -895,7 +895,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_x_f_v_i16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tum( @@ -904,7 +904,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tum( @@ -913,7 +913,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_x_f_v_i16m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tum( @@ -922,7 +922,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tum( @@ -931,7 +931,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tum( @@ -940,7 +940,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tum( @@ -949,7 +949,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tum( @@ -958,7 +958,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tum( @@ -967,7 +967,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_xu_f_v_u16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tum( @@ -976,7 +976,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tum( @@ -985,7 +985,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_xu_f_v_u16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tum( @@ -994,7 +994,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tum( @@ -1003,7 +1003,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_xu_f_v_u16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tum( @@ -1012,7 +1012,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tum( @@ -1021,7 +1021,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_xu_f_v_u16m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tum( @@ -1030,7 +1030,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tum( @@ -1039,7 +1039,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return vfcvt_f_x_v_f16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_tum( @@ -1048,7 +1048,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return vfcvt_f_x_v_f16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_tum( @@ -1057,7 +1057,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return vfcvt_f_x_v_f16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_tum( @@ -1066,7 +1066,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return vfcvt_f_x_v_f16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_tum( @@ -1075,7 +1075,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return vfcvt_f_x_v_f16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_tum( @@ -1084,7 +1084,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return vfcvt_f_x_v_f16m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_tum( @@ -1093,7 +1093,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_tum( @@ -1102,7 +1102,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_tum( @@ -1111,7 +1111,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return vfcvt_f_xu_v_f16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_tum( @@ -1120,7 +1120,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return vfcvt_f_xu_v_f16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_tum( @@ -1129,7 +1129,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return vfcvt_f_xu_v_f16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_tum( @@ -1138,7 +1138,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return vfcvt_f_xu_v_f16m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tum( @@ -1147,7 +1147,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tum( @@ -1156,7 +1156,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tum( @@ -1165,7 +1165,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_x_f_v_i32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tum( @@ -1174,7 +1174,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tum( @@ -1183,7 +1183,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_x_f_v_i32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tum( @@ -1192,7 +1192,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tum( @@ -1201,7 +1201,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_x_f_v_i32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tum( @@ -1210,7 +1210,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tum( @@ -1219,7 +1219,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_x_f_v_i32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tum( @@ -1228,7 +1228,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tum( @@ -1237,7 +1237,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tum( @@ -1246,7 +1246,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tum( @@ -1255,7 +1255,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tum( @@ -1264,7 +1264,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tum( @@ -1273,7 +1273,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tum( @@ -1282,7 +1282,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tum( @@ -1291,7 +1291,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tum( @@ -1300,7 +1300,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tum( @@ -1309,7 +1309,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tum( @@ -1318,7 +1318,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tum( @@ -1327,7 +1327,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return vfcvt_f_x_v_f32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_tum( @@ -1336,7 +1336,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return vfcvt_f_x_v_f32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_tum( @@ -1345,7 +1345,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return vfcvt_f_x_v_f32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_tum( @@ -1354,7 +1354,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return vfcvt_f_x_v_f32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_tum( @@ -1363,7 +1363,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return vfcvt_f_x_v_f32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tum( @@ -1372,7 +1372,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_tum( @@ -1381,7 +1381,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return vfcvt_f_xu_v_f32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_tum( @@ -1390,7 +1390,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return vfcvt_f_xu_v_f32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_tum( @@ -1399,7 +1399,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return vfcvt_f_xu_v_f32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_tum( @@ -1408,7 +1408,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return vfcvt_f_xu_v_f32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_tum( @@ -1417,7 +1417,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_x_f_v_i64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tum( @@ -1426,7 +1426,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tum( @@ -1435,7 +1435,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_x_f_v_i64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tum( @@ -1444,7 +1444,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tum( @@ -1453,7 +1453,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_x_f_v_i64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tum( @@ -1462,7 +1462,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tum( @@ -1471,7 +1471,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_x_f_v_i64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tum( @@ -1480,7 +1480,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tum( @@ -1489,7 +1489,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tum( @@ -1498,7 +1498,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tum( @@ -1507,7 +1507,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tum( @@ -1516,7 +1516,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tum( @@ -1525,7 +1525,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tum( @@ -1534,7 +1534,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tum( @@ -1543,7 +1543,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tum( @@ -1552,7 +1552,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tum( @@ -1561,7 +1561,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return vfcvt_f_x_v_f64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_tum( @@ -1570,7 +1570,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return vfcvt_f_x_v_f64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_tum( @@ -1579,7 +1579,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return vfcvt_f_x_v_f64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_tum( @@ -1588,7 +1588,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return vfcvt_f_x_v_f64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_tum( @@ -1597,7 +1597,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return vfcvt_f_xu_v_f64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_tum( @@ -1606,7 +1606,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return vfcvt_f_xu_v_f64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_tum( @@ -1615,7 +1615,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return vfcvt_f_xu_v_f64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_tum( @@ -1624,7 +1624,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return vfcvt_f_xu_v_f64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_tumu( @@ -1633,7 +1633,7 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_x_f_v_i16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tumu( @@ -1642,7 +1642,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tumu( @@ -1651,7 +1651,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_x_f_v_i16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tumu( @@ -1660,7 +1660,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tumu( @@ -1669,7 +1669,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_x_f_v_i16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tumu( @@ -1678,7 +1678,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tumu( @@ -1687,7 +1687,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_x_f_v_i16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tumu( @@ -1696,7 +1696,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tumu( @@ -1705,7 +1705,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_x_f_v_i16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tumu( @@ -1714,7 +1714,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tumu( @@ -1723,7 +1723,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_x_f_v_i16m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tumu( @@ -1732,7 +1732,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tumu( @@ -1741,7 +1741,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tumu( @@ -1750,7 +1750,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tumu( @@ -1759,7 +1759,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tumu( @@ -1768,7 +1768,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tumu( @@ -1777,7 +1777,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_xu_f_v_u16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tumu( @@ -1786,7 +1786,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tumu( @@ -1795,7 +1795,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_xu_f_v_u16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tumu( @@ -1804,7 +1804,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tumu( @@ -1813,7 +1813,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_xu_f_v_u16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tumu( @@ -1822,7 +1822,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tumu( @@ -1831,7 +1831,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_xu_f_v_u16m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tumu( @@ -1840,7 +1840,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tumu( @@ -1849,7 +1849,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return vfcvt_f_x_v_f16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_tumu( @@ -1858,7 +1858,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return vfcvt_f_x_v_f16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_tumu( @@ -1867,7 +1867,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return vfcvt_f_x_v_f16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_tumu( @@ -1876,7 +1876,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return vfcvt_f_x_v_f16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_tumu( @@ -1885,7 +1885,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return vfcvt_f_x_v_f16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_tumu( @@ -1894,7 +1894,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return vfcvt_f_x_v_f16m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_tumu( @@ -1903,7 +1903,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_tumu( @@ -1912,7 +1912,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_tumu( @@ -1921,7 +1921,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return vfcvt_f_xu_v_f16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_tumu( @@ -1930,7 +1930,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return vfcvt_f_xu_v_f16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_tumu( @@ -1939,7 +1939,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return vfcvt_f_xu_v_f16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_tumu( @@ -1948,7 +1948,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return vfcvt_f_xu_v_f16m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_tumu( @@ -1957,7 +1957,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tumu( @@ -1966,7 +1966,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tumu( @@ -1975,7 +1975,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tumu( @@ -1984,7 +1984,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tumu( @@ -1993,7 +1993,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tumu( @@ -2002,7 +2002,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tumu( @@ -2011,7 +2011,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tumu( @@ -2020,7 +2020,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tumu( @@ -2029,7 +2029,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tumu( @@ -2038,7 +2038,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tumu( @@ -2047,7 +2047,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tumu( @@ -2056,7 +2056,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tumu( @@ -2065,7 +2065,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tumu( @@ -2074,7 +2074,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tumu( @@ -2083,7 +2083,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tumu( @@ -2092,7 +2092,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tumu( @@ -2101,7 +2101,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tumu( @@ -2110,7 +2110,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tumu( @@ -2119,7 +2119,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tumu( @@ -2128,7 +2128,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tumu( @@ -2137,7 +2137,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return vfcvt_f_x_v_f32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_tumu( @@ -2146,7 +2146,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return vfcvt_f_x_v_f32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_tumu( @@ -2155,7 +2155,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return vfcvt_f_x_v_f32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_tumu( @@ -2164,7 +2164,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return vfcvt_f_x_v_f32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_tumu( @@ -2173,7 +2173,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return vfcvt_f_x_v_f32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_tumu( @@ -2182,7 +2182,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_tumu( @@ -2191,7 +2191,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return vfcvt_f_xu_v_f32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_tumu( @@ -2200,7 +2200,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return vfcvt_f_xu_v_f32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_tumu( @@ -2209,7 +2209,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return vfcvt_f_xu_v_f32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_tumu( @@ -2218,7 +2218,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return vfcvt_f_xu_v_f32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_tumu( @@ -2227,7 +2227,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tumu( @@ -2236,7 +2236,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tumu( @@ -2245,7 +2245,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tumu( @@ -2254,7 +2254,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tumu( @@ -2263,7 +2263,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tumu( @@ -2272,7 +2272,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tumu( @@ -2281,7 +2281,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tumu( @@ -2290,7 +2290,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tumu( @@ -2299,7 +2299,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tumu( @@ -2308,7 +2308,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tumu( @@ -2317,7 +2317,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tumu( @@ -2326,7 +2326,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tumu( @@ -2335,7 +2335,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tumu( @@ -2344,7 +2344,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tumu( @@ -2353,7 +2353,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tumu( @@ -2362,7 +2362,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tumu( @@ -2371,7 +2371,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return vfcvt_f_x_v_f64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_tumu( @@ -2380,7 +2380,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return vfcvt_f_x_v_f64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_tumu( @@ -2389,7 +2389,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return vfcvt_f_x_v_f64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_tumu( @@ -2398,7 +2398,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return vfcvt_f_x_v_f64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_tumu( @@ -2407,7 +2407,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return vfcvt_f_xu_v_f64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_tumu( @@ -2416,7 +2416,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return vfcvt_f_xu_v_f64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_tumu( @@ -2425,7 +2425,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return vfcvt_f_xu_v_f64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_tumu( @@ -2434,7 +2434,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return vfcvt_f_xu_v_f64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_mu( @@ -2443,7 +2443,7 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_x_f_v_i16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_mu( @@ -2452,7 +2452,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_mu( @@ -2461,7 +2461,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_x_f_v_i16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_mu( @@ -2470,7 +2470,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_mu( @@ -2479,7 +2479,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_x_f_v_i16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_mu( @@ -2488,7 +2488,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_mu( @@ -2497,7 +2497,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_x_f_v_i16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_mu( @@ -2506,7 +2506,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_mu( @@ -2515,7 +2515,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_x_f_v_i16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_mu( @@ -2524,7 +2524,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_mu( @@ -2533,7 +2533,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_x_f_v_i16m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i16m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_mu( @@ -2542,7 +2542,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i16m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_mu( @@ -2551,7 +2551,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_mu( @@ -2560,7 +2560,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_mu( @@ -2569,7 +2569,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_mu( @@ -2578,7 +2578,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_mu( @@ -2587,7 +2587,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_xu_f_v_u16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_mu( @@ -2596,7 +2596,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_mu( @@ -2605,7 +2605,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_xu_f_v_u16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_mu( @@ -2614,7 +2614,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_mu( @@ -2623,7 +2623,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_xu_f_v_u16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_mu( @@ -2632,7 +2632,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_mu( @@ -2641,7 +2641,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_xu_f_v_u16m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_mu( @@ -2650,7 +2650,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u16m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_mu( @@ -2659,7 +2659,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return vfcvt_f_x_v_f16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_mu( @@ -2668,7 +2668,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return vfcvt_f_x_v_f16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_mu( @@ -2677,7 +2677,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return vfcvt_f_x_v_f16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_mu( @@ -2686,7 +2686,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return vfcvt_f_x_v_f16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_mu( @@ -2695,7 +2695,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return vfcvt_f_x_v_f16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_mu( @@ -2704,7 +2704,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return vfcvt_f_x_v_f16m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f16m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_mu( @@ -2713,7 +2713,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_mu( @@ -2722,7 +2722,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_mu( @@ -2731,7 +2731,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return vfcvt_f_xu_v_f16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_mu( @@ -2740,7 +2740,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return vfcvt_f_xu_v_f16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_mu( @@ -2749,7 +2749,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return vfcvt_f_xu_v_f16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_mu( @@ -2758,7 +2758,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return vfcvt_f_xu_v_f16m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f16m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_mu( @@ -2767,7 +2767,7 @@ vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_mu( @@ -2776,7 +2776,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_mu( @@ -2785,7 +2785,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_x_f_v_i32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_mu( @@ -2794,7 +2794,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_mu( @@ -2803,7 +2803,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_x_f_v_i32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_mu( @@ -2812,7 +2812,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_mu( @@ -2821,7 +2821,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_x_f_v_i32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_mu( @@ -2830,7 +2830,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_mu( @@ -2839,7 +2839,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_x_f_v_i32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_mu( @@ -2848,7 +2848,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_mu( @@ -2857,7 +2857,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_mu( @@ -2866,7 +2866,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_mu( @@ -2875,7 +2875,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_mu( @@ -2884,7 +2884,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_mu( @@ -2893,7 +2893,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_mu( @@ -2902,7 +2902,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_mu( @@ -2911,7 +2911,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_mu( @@ -2920,7 +2920,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_mu( @@ -2929,7 +2929,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_mu( @@ -2938,7 +2938,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_mu( @@ -2947,7 +2947,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return vfcvt_f_x_v_f32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_mu( @@ -2956,7 +2956,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return vfcvt_f_x_v_f32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_mu( @@ -2965,7 +2965,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return vfcvt_f_x_v_f32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_mu( @@ -2974,7 +2974,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return vfcvt_f_x_v_f32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_mu( @@ -2983,7 +2983,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return vfcvt_f_x_v_f32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_mu( @@ -2992,7 +2992,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_mu( @@ -3001,7 +3001,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return vfcvt_f_xu_v_f32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_mu( @@ -3010,7 +3010,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return vfcvt_f_xu_v_f32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_mu( @@ -3019,7 +3019,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return vfcvt_f_xu_v_f32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_mu( @@ -3028,7 +3028,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return vfcvt_f_xu_v_f32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_mu( @@ -3037,7 +3037,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_x_f_v_i64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_mu( @@ -3046,7 +3046,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_mu( @@ -3055,7 +3055,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_x_f_v_i64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_mu( @@ -3064,7 +3064,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_mu( @@ -3073,7 +3073,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_x_f_v_i64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_mu( @@ -3082,7 +3082,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_mu( @@ -3091,7 +3091,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_x_f_v_i64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_x_f_v_i64m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_mu( @@ -3100,7 +3100,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_x_f_v_i64m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_mu( @@ -3109,7 +3109,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_mu( @@ -3118,7 +3118,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_mu( @@ -3127,7 +3127,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_mu( @@ -3136,7 +3136,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_mu( @@ -3145,7 +3145,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_mu( @@ -3154,7 +3154,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_mu( @@ -3163,7 +3163,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_mu( @@ -3172,7 +3172,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_rtz_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_mu( @@ -3181,7 +3181,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return vfcvt_f_x_v_f64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_mu( @@ -3190,7 +3190,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return vfcvt_f_x_v_f64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_mu( @@ -3199,7 +3199,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return vfcvt_f_x_v_f64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_mu( @@ -3208,7 +3208,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return vfcvt_f_x_v_f64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_x_v_f64m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_mu( @@ -3217,7 +3217,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return vfcvt_f_xu_v_f64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_mu( @@ -3226,7 +3226,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return vfcvt_f_xu_v_f64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_mu( @@ -3235,7 +3235,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return vfcvt_f_xu_v_f64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_mu( @@ -3244,6 +3244,6 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return vfcvt_f_xu_v_f64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfcvt_f_xu_v_f64m8_mu(mask, maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfdiv.c index 7028a15b4b44..abc5fe752ca1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfdiv.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfdiv_vv_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfdiv_vv_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfdiv_vv_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfdiv_vv_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfdiv_vv_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfdiv_vv_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfdiv_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfdiv_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfdiv_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfdiv_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfdiv_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfdiv_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfdiv_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfdiv_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfdiv_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfdiv_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfdiv_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfdiv_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfdiv_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfdiv_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfdiv_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfdiv_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfdiv_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfdiv_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfdiv_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfdiv_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfdiv_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfdiv_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfdiv_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfdiv_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfdiv_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfdiv_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfdiv_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfdiv_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfdiv_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfdiv_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfdiv_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfdiv_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfdiv_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfdiv_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfdiv_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfdiv_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfdiv_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfdiv_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfdiv_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfdiv_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfdiv_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfdiv_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfdiv_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfdiv_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfdiv_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfdiv_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfdiv_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfdiv_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfdiv_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfdiv_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfdiv_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfdiv_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfdiv_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfdiv_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfdiv_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfdiv_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfdiv_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfdiv_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfdiv_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfdiv_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfdiv_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfdiv_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfdiv_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfdiv_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c index f09b7148a153..f9e88ee2896b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vv_f16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vf_f16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vv_f16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vf_f16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vv_f16m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vf_f16m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vv_f16m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vf_f16m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vv_f16m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vf_f16m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vv_f16m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vf_f16m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vf_f32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vf_f32m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vf_f32m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vf_f32m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vf_f32m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vf_f64m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vf_f64m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vf_f64m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vf_f64m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vv_f16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vf_f16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vv_f16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vf_f16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vv_f16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vf_f16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vv_f16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vf_f16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vf_f32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vf_f32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vf_f32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vf_f32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vf_f64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vf_f64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vf_f64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vf_f64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmacc_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmacc_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vv_f16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmacc_vf_f16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vv_f16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmacc_vf_f16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vv_f16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmacc_vf_f16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vv_f16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmacc_vf_f16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmacc_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmacc_vf_f32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmacc_vf_f32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmacc_vf_f32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmacc_vf_f32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmacc_vf_f64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmacc_vf_f64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmacc_vf_f64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmacc_vf_f64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmacc_vf_f64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c index 40fa6e56cc21..a3194992b866 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmadd.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vv_f16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vf_f16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vv_f16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vf_f16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vv_f16m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vf_f16m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vv_f16m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vf_f16m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vv_f16m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vf_f16m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vv_f16m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vf_f16m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vf_f32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vf_f32m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vf_f32m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vf_f32m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vf_f32m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vf_f64m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vf_f64m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vf_f64m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vf_f64m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vv_f16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vf_f16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vv_f16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vf_f16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vv_f16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vf_f16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vv_f16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vf_f16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vf_f32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vf_f32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vf_f32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vf_f32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vf_f64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vf_f64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vf_f64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vf_f64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmadd_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmadd_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vv_f16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmadd_vf_f16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vv_f16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmadd_vf_f16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vv_f16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmadd_vf_f16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vv_f16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmadd_vf_f16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmadd_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmadd_vf_f32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmadd_vf_f32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmadd_vf_f32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmadd_vf_f32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmadd_vf_f64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmadd_vf_f64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmadd_vf_f64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmadd_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmadd_vf_f64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmadd_vf_f64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax.c index 9cfeaa33c4f0..0af8a69d5256 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmax.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmax_vv_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmax_vv_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmax_vv_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmax_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmax_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmax_vv_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmax_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmax_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmax_vv_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmax_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmax_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmax_vv_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmax_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmax_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmax_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmax_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmax_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmax_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmax_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmax_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmax_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmax_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmax_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmax_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmax_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmax_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmax_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmax_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmax_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmax_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmax_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmax_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmax_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmax_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmax_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmax_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmax_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmax_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmax_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmax_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmax_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmax_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmax_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmax_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmax_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmax_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmax_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmax_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmax_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmax_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmax_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmax_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmax_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmax_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmax_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmax_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmax_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmax_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmax_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmax_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmax_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmax_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmax_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmax_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmax_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmax_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmax_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmax_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmax_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmax_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmax_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmax_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmax_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmax_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmax_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmax_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmax_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmax_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmax_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmax_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmax_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmax_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmax_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmax_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmax_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmax_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmax_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmax_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmax_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmax_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmax_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmax_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmax_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfmax_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmax_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfmax_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmax_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmax_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmax_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfmax_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfmax_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmax_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfmax_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfmax_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmax_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfmax_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfmax_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmax_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfmax_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfmax_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmax_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmax_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmax_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfmax_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmax_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfmax_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmax_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfmax_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmax_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfmax_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmax_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfmax_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmax_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfmax_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmax_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfmax_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmax_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfmax_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmax_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfmax_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmax_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfmax_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmax_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfmax_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmax_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfmax_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmax_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfmax_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmax_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfmax_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmax_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfmax_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmax_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfmax_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmax_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmax_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmax_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmax_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmax_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmax_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmax_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmax_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmax_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfmax_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmax_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfmax_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmax_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfmax_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmax_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfmax_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmax_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfmax_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmax_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfmax_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmax_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfmax_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmax_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfmax_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmax_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmax_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmax_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfmax_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmax_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfmax_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmax_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfmax_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmax_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfmax_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmax_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfmax_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmax_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmax_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmax_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmax_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmax_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmax_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmax_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmax_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmax_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmax_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmax_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmax_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmax_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmax_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmax_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmax_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmax_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmax_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmax_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmax_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmax_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmax_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c index b0286fb8ab1e..86cbff68f304 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) { - return vfmerge_vfm_f16mf4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) { - return vfmerge_vfm_f16mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) { - return vfmerge_vfm_f16m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) { - return vfmerge_vfm_f16m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) { - return vfmerge_vfm_f16m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) { - return vfmerge_vfm_f16m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) { - return vfmerge_vfm_f32mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) { - return vfmerge_vfm_f32m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) { - return vfmerge_vfm_f32m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) { - return vfmerge_vfm_f32m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) { - return vfmerge_vfm_f32m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) { - return vfmerge_vfm_f64m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) { - return vfmerge_vfm_f64m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) { - return vfmerge_vfm_f64m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_tu( @@ -139,6 +139,6 @@ vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) { - return vfmerge_vfm_f64m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vfmerge_vfm_f64m8_tu(maskedoff, op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin.c index 855a518770a1..c898f0a54e9b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmin.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmin_vv_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmin_vv_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmin_vv_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmin_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmin_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmin_vv_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmin_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmin_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmin_vv_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmin_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmin_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmin_vv_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmin_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmin_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmin_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmin_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmin_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmin_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmin_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmin_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmin_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmin_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmin_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmin_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmin_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmin_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmin_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmin_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmin_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmin_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmin_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmin_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmin_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmin_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmin_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmin_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmin_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmin_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmin_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmin_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmin_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmin_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmin_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmin_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmin_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmin_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmin_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmin_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmin_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmin_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmin_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmin_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmin_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmin_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmin_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmin_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmin_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmin_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmin_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmin_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmin_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmin_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmin_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmin_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmin_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfmin_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmin_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfmin_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmin_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmin_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmin_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfmin_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfmin_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmin_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfmin_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfmin_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmin_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfmin_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfmin_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmin_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfmin_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfmin_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmin_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmin_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmin_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmin_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmin_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmin_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmin_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmin_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmin_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmin_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmin_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmin_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmin_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmin_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmin_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmin_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmin_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfmin_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmin_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfmin_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmin_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmin_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmin_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmin_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmin_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmin_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmin_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmin_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmin_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfmin_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmin_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfmin_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmin_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfmin_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmin_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfmin_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmin_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfmin_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmin_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfmin_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmin_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfmin_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmin_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfmin_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmin_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmin_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmin_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmin_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmin_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmin_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmin_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmin_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmin_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmin_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmin_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmin_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmin_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmin_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmin_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmin_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmin_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmin_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmin_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmin_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c index 21aae52f7cbe..1597d60861ca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsac.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vv_f16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vf_f16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vv_f16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vf_f16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vv_f16m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vf_f16m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vv_f16m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vf_f16m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vv_f16m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vf_f16m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vv_f16m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vf_f16m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vf_f32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vf_f32m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vf_f32m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vf_f32m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vf_f32m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vf_f64m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vf_f64m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vf_f64m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vf_f64m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vv_f16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vf_f16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vv_f16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vf_f16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vv_f16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vf_f16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vv_f16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vf_f16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vf_f32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vf_f32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vf_f32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vf_f32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vf_f64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vf_f64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vf_f64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vf_f64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsac_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsac_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vv_f16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsac_vf_f16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vv_f16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsac_vf_f16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vv_f16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsac_vf_f16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vv_f16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsac_vf_f16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsac_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsac_vf_f32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsac_vf_f32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsac_vf_f32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsac_vf_f32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsac_vf_f64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsac_vf_f64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsac_vf_f64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsac_vf_f64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsac_vf_f64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c index c3fe807de687..9ea7476be256 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vv_f16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vf_f16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vv_f16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vf_f16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vv_f16m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vf_f16m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vv_f16m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vf_f16m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vv_f16m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vf_f16m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vv_f16m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vf_f16m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vf_f32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vf_f32m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vf_f32m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vf_f32m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vf_f32m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vf_f64m1_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vf_f64m2_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vf_f64m4_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vf_f64m8_tu(vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vv_f16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vf_f16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vv_f16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vf_f16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vv_f16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vf_f16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vv_f16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vf_f16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vf_f32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vf_f32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vf_f32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vf_f32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vf_f64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vf_f64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vf_f64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vf_f64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfmsub_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfmsub_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vv_f16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfmsub_vf_f16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vv_f16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfmsub_vf_f16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vv_f16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfmsub_vf_f16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vv_f16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfmsub_vf_f16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfmsub_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfmsub_vf_f32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfmsub_vf_f32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfmsub_vf_f32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfmsub_vf_f32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfmsub_vf_f64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfmsub_vf_f64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfmsub_vf_f64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfmsub_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfmsub_vf_f64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfmsub_vf_f64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul.c index f51ff8990178..b9c6bddc1b08 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmul.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmul_vv_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmul_vv_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmul_vv_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfmul_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfmul_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmul_vv_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfmul_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfmul_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmul_vv_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfmul_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfmul_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmul_vv_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfmul_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfmul_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmul_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmul_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmul_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmul_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmul_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmul_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmul_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmul_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmul_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmul_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmul_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmul_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmul_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmul_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmul_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmul_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmul_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmul_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmul_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmul_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmul_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfmul_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfmul_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmul_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfmul_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfmul_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmul_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfmul_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfmul_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmul_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfmul_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfmul_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmul_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmul_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmul_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmul_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmul_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmul_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmul_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmul_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmul_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmul_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmul_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmul_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmul_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmul_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmul_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmul_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmul_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmul_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmul_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmul_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmul_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfmul_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfmul_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmul_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfmul_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfmul_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmul_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfmul_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfmul_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmul_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfmul_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfmul_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmul_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmul_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmul_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmul_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmul_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmul_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmul_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmul_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmul_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmul_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmul_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmul_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmul_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmul_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmul_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmul_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmul_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmul_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmul_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfmul_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmul_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfmul_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmul_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfmul_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmul_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfmul_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmul_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfmul_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmul_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfmul_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmul_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfmul_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmul_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfmul_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmul_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfmul_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmul_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfmul_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmul_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfmul_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmul_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfmul_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmul_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmul_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmul_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmul_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmul_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmul_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmul_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmul_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmul_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmul_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmul_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmul_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmul_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmul_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmul_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmul_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmul_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmul_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfmul_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmv.c index ac7cf14592f3..97839bea00aa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmv.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmv_v_f_f16mf4_tu(vfloat16mf4_t maskedoff, _Float16 src, size_t vl) { - return vfmv_v_f_f16mf4_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfmv_v_f_f16mf4_tu(vfloat16mf4_t maskedoff, _Float16 src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmv_v_f_f16mf2_tu(vfloat16mf2_t maskedoff, _Float16 src, size_t vl) { - return vfmv_v_f_f16mf2_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfmv_v_f_f16mf2_tu(vfloat16mf2_t maskedoff, _Float16 src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmv_v_f_f16m1_tu(vfloat16m1_t maskedoff, _Float16 src, size_t vl) { - return vfmv_v_f_f16m1_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfmv_v_f_f16m1_tu(vfloat16m1_t maskedoff, _Float16 src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmv_v_f_f16m2_tu(vfloat16m2_t maskedoff, _Float16 src, size_t vl) { - return vfmv_v_f_f16m2_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfmv_v_f_f16m2_tu(vfloat16m2_t maskedoff, _Float16 src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmv_v_f_f16m4_tu(vfloat16m4_t maskedoff, _Float16 src, size_t vl) { - return vfmv_v_f_f16m4_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfmv_v_f_f16m4_tu(vfloat16m4_t maskedoff, _Float16 src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmv_v_f_f16m8_tu(vfloat16m8_t maskedoff, _Float16 src, size_t vl) { - return vfmv_v_f_f16m8_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfmv_v_f_f16m8_tu(vfloat16m8_t maskedoff, _Float16 src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmv_v_f_f32mf2_tu(vfloat32mf2_t maskedoff, float src, size_t vl) { - return vfmv_v_f_f32mf2_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfmv_v_f_f32mf2_tu(vfloat32mf2_t maskedoff, float src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmv_v_f_f32m1_tu(vfloat32m1_t maskedoff, float src, size_t vl) { - return vfmv_v_f_f32m1_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfmv_v_f_f32m1_tu(vfloat32m1_t maskedoff, float src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmv_v_f_f32m2_tu(vfloat32m2_t maskedoff, float src, size_t vl) { - return vfmv_v_f_f32m2_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfmv_v_f_f32m2_tu(vfloat32m2_t maskedoff, float src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmv_v_f_f32m4_tu(vfloat32m4_t maskedoff, float src, size_t vl) { - return vfmv_v_f_f32m4_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfmv_v_f_f32m4_tu(vfloat32m4_t maskedoff, float src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmv_v_f_f32m8_tu(vfloat32m8_t maskedoff, float src, size_t vl) { - return vfmv_v_f_f32m8_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfmv_v_f_f32m8_tu(vfloat32m8_t maskedoff, float src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmv_v_f_f64m1_tu(vfloat64m1_t maskedoff, double src, size_t vl) { - return vfmv_v_f_f64m1_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfmv_v_f_f64m1_tu(vfloat64m1_t maskedoff, double src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmv_v_f_f64m2_tu(vfloat64m2_t maskedoff, double src, size_t vl) { - return vfmv_v_f_f64m2_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfmv_v_f_f64m2_tu(vfloat64m2_t maskedoff, double src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmv_v_f_f64m4_tu(vfloat64m4_t maskedoff, double src, size_t vl) { - return vfmv_v_f_f64m4_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfmv_v_f_f64m4_tu(vfloat64m4_t maskedoff, double src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmv_v_f_f64m8_tu(vfloat64m8_t maskedoff, double src, size_t vl) { - return vfmv_v_f_f64m8_tu(maskedoff, src, vl); + return __riscv_vfmv_v_f_f64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4_tu( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfmv_v_f_f64m8_tu(vfloat64m8_t maskedoff, double src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfmv_s_f_f16mf4_tu(vfloat16mf4_t maskedoff, _Float16 src, size_t vl) { - return vfmv_s_f_f16mf4_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2_tu( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfmv_s_f_f16mf4_tu(vfloat16mf4_t maskedoff, _Float16 src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfmv_s_f_f16mf2_tu(vfloat16mf2_t maskedoff, _Float16 src, size_t vl) { - return vfmv_s_f_f16mf2_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1_tu( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfmv_s_f_f16mf2_tu(vfloat16mf2_t maskedoff, _Float16 src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfmv_s_f_f16m1_tu(vfloat16m1_t maskedoff, _Float16 src, size_t vl) { - return vfmv_s_f_f16m1_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2_tu( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfmv_s_f_f16m1_tu(vfloat16m1_t maskedoff, _Float16 src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfmv_s_f_f16m2_tu(vfloat16m2_t maskedoff, _Float16 src, size_t vl) { - return vfmv_s_f_f16m2_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4_tu( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfmv_s_f_f16m2_tu(vfloat16m2_t maskedoff, _Float16 src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfmv_s_f_f16m4_tu(vfloat16m4_t maskedoff, _Float16 src, size_t vl) { - return vfmv_s_f_f16m4_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8_tu( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfmv_s_f_f16m4_tu(vfloat16m4_t maskedoff, _Float16 src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfmv_s_f_f16m8_tu(vfloat16m8_t maskedoff, _Float16 src, size_t vl) { - return vfmv_s_f_f16m8_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2_tu( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfmv_s_f_f16m8_tu(vfloat16m8_t maskedoff, _Float16 src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmv_s_f_f32mf2_tu(vfloat32mf2_t maskedoff, float src, size_t vl) { - return vfmv_s_f_f32mf2_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1_tu( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfmv_s_f_f32mf2_tu(vfloat32mf2_t maskedoff, float src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmv_s_f_f32m1_tu(vfloat32m1_t maskedoff, float src, size_t vl) { - return vfmv_s_f_f32m1_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2_tu( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfmv_s_f_f32m1_tu(vfloat32m1_t maskedoff, float src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmv_s_f_f32m2_tu(vfloat32m2_t maskedoff, float src, size_t vl) { - return vfmv_s_f_f32m2_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4_tu( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfmv_s_f_f32m2_tu(vfloat32m2_t maskedoff, float src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmv_s_f_f32m4_tu(vfloat32m4_t maskedoff, float src, size_t vl) { - return vfmv_s_f_f32m4_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8_tu( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfmv_s_f_f32m4_tu(vfloat32m4_t maskedoff, float src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmv_s_f_f32m8_tu(vfloat32m8_t maskedoff, float src, size_t vl) { - return vfmv_s_f_f32m8_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1_tu( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfmv_s_f_f32m8_tu(vfloat32m8_t maskedoff, float src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmv_s_f_f64m1_tu(vfloat64m1_t maskedoff, double src, size_t vl) { - return vfmv_s_f_f64m1_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2_tu( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfmv_s_f_f64m1_tu(vfloat64m1_t maskedoff, double src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmv_s_f_f64m2_tu(vfloat64m2_t maskedoff, double src, size_t vl) { - return vfmv_s_f_f64m2_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4_tu( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfmv_s_f_f64m2_tu(vfloat64m2_t maskedoff, double src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmv_s_f_f64m4_tu(vfloat64m4_t maskedoff, double src, size_t vl) { - return vfmv_s_f_f64m4_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8_tu( @@ -274,6 +274,6 @@ vfloat64m4_t test_vfmv_s_f_f64m4_tu(vfloat64m4_t maskedoff, double src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmv_s_f_f64m8_tu(vfloat64m8_t maskedoff, double src, size_t vl) { - return vfmv_s_f_f64m8_tu(maskedoff, src, vl); + return __riscv_vfmv_s_f_f64m8_tu(maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt.c index 7be371615e4a..598b907b1bc6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_x_f_w_i8mf8_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tu( @@ -22,7 +22,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf8_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tu( @@ -31,7 +31,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_x_f_w_i8mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tu( @@ -40,7 +40,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tu( @@ -49,7 +49,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_x_f_w_i8mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tu( @@ -58,7 +58,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tu( @@ -67,7 +67,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_x_f_w_i8m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tu( @@ -76,7 +76,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tu( @@ -85,7 +85,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_x_f_w_i8m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tu( @@ -94,7 +94,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tu( @@ -103,7 +103,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_x_f_w_i8m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tu( @@ -112,7 +112,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tu( @@ -121,7 +121,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf8_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tu( @@ -130,7 +130,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf8_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tu( @@ -139,7 +139,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tu( @@ -148,7 +148,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tu( @@ -157,7 +157,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tu( @@ -166,7 +166,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tu( @@ -175,7 +175,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_xu_f_w_u8m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tu( @@ -184,7 +184,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tu( @@ -193,7 +193,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_xu_f_w_u8m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tu( @@ -202,7 +202,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tu( @@ -211,7 +211,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_xu_f_w_u8m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tu( @@ -220,7 +220,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tu( @@ -229,7 +229,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_x_f_w_i16mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tu( @@ -238,7 +238,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tu( @@ -247,7 +247,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_x_f_w_i16mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tu( @@ -256,7 +256,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tu( @@ -265,7 +265,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_x_f_w_i16m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tu( @@ -274,7 +274,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tu( @@ -283,7 +283,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_x_f_w_i16m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tu( @@ -292,7 +292,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tu( @@ -301,7 +301,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_x_f_w_i16m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tu( @@ -310,7 +310,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tu( @@ -319,7 +319,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tu( @@ -328,7 +328,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tu( @@ -337,7 +337,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tu( @@ -346,7 +346,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tu( @@ -355,7 +355,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_xu_f_w_u16m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tu( @@ -364,7 +364,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tu( @@ -373,7 +373,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_xu_f_w_u16m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tu( @@ -382,7 +382,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tu( @@ -391,7 +391,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_xu_f_w_u16m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tu( @@ -400,7 +400,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tu( @@ -409,7 +409,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vfncvt_f_x_w_f16mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_tu( @@ -418,7 +418,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t maskedoff, vint32mf2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tu(vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vfncvt_f_x_w_f16mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_tu( @@ -427,7 +427,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tu(vfloat16mf2_t maskedoff, vint32m1_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_x_w_f16m1_tu(vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vfncvt_f_x_w_f16m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_tu( @@ -436,7 +436,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1_tu(vfloat16m1_t maskedoff, vint32m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_x_w_f16m2_tu(vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vfncvt_f_x_w_f16m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_tu( @@ -445,7 +445,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2_tu(vfloat16m2_t maskedoff, vint32m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_x_w_f16m4_tu(vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vfncvt_f_x_w_f16m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tu( @@ -454,7 +454,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4_tu(vfloat16m4_t maskedoff, vint32m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_tu( @@ -463,7 +463,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t maskedoff, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tu(vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_tu( @@ -472,7 +472,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tu(vfloat16mf2_t maskedoff, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tu(vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vfncvt_f_xu_w_f16m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_tu( @@ -481,7 +481,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tu(vfloat16m1_t maskedoff, vuint32m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tu(vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vfncvt_f_xu_w_f16m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_tu( @@ -490,7 +490,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tu(vfloat16m2_t maskedoff, vuint32m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tu(vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vfncvt_f_xu_w_f16m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tu( @@ -499,7 +499,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tu(vfloat16m4_t maskedoff, vuint32m8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_f_f_w_f16mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tu( @@ -508,7 +508,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tu( @@ -517,7 +517,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_f_f_w_f16mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tu( @@ -526,7 +526,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tu( @@ -535,7 +535,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_f_f_w_f16m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tu( @@ -544,7 +544,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tu( @@ -553,7 +553,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_f_f_w_f16m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tu( @@ -562,7 +562,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tu( @@ -571,7 +571,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_f_f_w_f16m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tu( @@ -580,7 +580,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tu( @@ -589,7 +589,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_x_f_w_i32mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tu( @@ -598,7 +598,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tu( @@ -607,7 +607,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_x_f_w_i32m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tu( @@ -616,7 +616,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tu( @@ -625,7 +625,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_x_f_w_i32m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tu( @@ -634,7 +634,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tu( @@ -643,7 +643,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_x_f_w_i32m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tu( @@ -652,7 +652,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tu( @@ -661,7 +661,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_xu_f_w_u32mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tu( @@ -670,7 +670,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tu( @@ -679,7 +679,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_xu_f_w_u32m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tu( @@ -688,7 +688,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tu( @@ -697,7 +697,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_xu_f_w_u32m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tu( @@ -706,7 +706,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tu( @@ -715,7 +715,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_xu_f_w_u32m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tu( @@ -724,7 +724,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tu( @@ -733,7 +733,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tu(vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vfncvt_f_x_w_f32mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_tu( @@ -742,7 +742,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tu(vfloat32mf2_t maskedoff, vint64m1_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1_tu(vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vfncvt_f_x_w_f32m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_tu( @@ -751,7 +751,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_tu(vfloat32m1_t maskedoff, vint64m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2_tu(vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vfncvt_f_x_w_f32m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_tu( @@ -760,7 +760,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_tu(vfloat32m2_t maskedoff, vint64m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4_tu(vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vfncvt_f_x_w_f32m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_tu( @@ -769,7 +769,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_tu(vfloat32m4_t maskedoff, vint64m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tu(vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vfncvt_f_xu_w_f32mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_tu( @@ -778,7 +778,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tu(vfloat32mf2_t maskedoff, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tu(vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vfncvt_f_xu_w_f32m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_tu( @@ -787,7 +787,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tu(vfloat32m1_t maskedoff, vuint64m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tu(vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vfncvt_f_xu_w_f32m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_tu( @@ -796,7 +796,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tu(vfloat32m2_t maskedoff, vuint64m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tu(vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vfncvt_f_xu_w_f32m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_tu( @@ -805,7 +805,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tu(vfloat32m4_t maskedoff, vuint64m8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_f_f_w_f32mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tu( @@ -814,7 +814,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32mf2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tu( @@ -823,7 +823,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_f_f_w_f32m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tu( @@ -832,7 +832,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m1_tu(maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tu( @@ -841,7 +841,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_f_f_w_f32m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tu( @@ -850,7 +850,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m2_tu(maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tu( @@ -859,7 +859,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_f_f_w_f32m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tu( @@ -868,7 +868,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m4_tu(maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_tum( @@ -877,7 +877,7 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_x_f_w_i8mf8_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tum( @@ -886,7 +886,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf8_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tum( @@ -895,7 +895,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_x_f_w_i8mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tum( @@ -904,7 +904,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tum( @@ -913,7 +913,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_x_f_w_i8mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tum( @@ -922,7 +922,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tum( @@ -931,7 +931,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_x_f_w_i8m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tum( @@ -940,7 +940,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tum( @@ -949,7 +949,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_x_f_w_i8m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tum( @@ -958,7 +958,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tum( @@ -967,7 +967,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_x_f_w_i8m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tum( @@ -976,7 +976,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tum( @@ -985,7 +985,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf8_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tum( @@ -994,7 +994,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf8_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tum( @@ -1003,7 +1003,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tum( @@ -1012,7 +1012,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tum( @@ -1021,7 +1021,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tum( @@ -1030,7 +1030,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tum( @@ -1039,7 +1039,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_xu_f_w_u8m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tum( @@ -1048,7 +1048,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tum( @@ -1057,7 +1057,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_xu_f_w_u8m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tum( @@ -1066,7 +1066,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tum( @@ -1075,7 +1075,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_xu_f_w_u8m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tum( @@ -1084,7 +1084,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tum( @@ -1093,7 +1093,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_x_f_w_i16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tum( @@ -1102,7 +1102,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tum( @@ -1111,7 +1111,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_x_f_w_i16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tum( @@ -1120,7 +1120,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tum( @@ -1129,7 +1129,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_x_f_w_i16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tum( @@ -1138,7 +1138,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tum( @@ -1147,7 +1147,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_x_f_w_i16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tum( @@ -1156,7 +1156,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tum( @@ -1165,7 +1165,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_x_f_w_i16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tum( @@ -1174,7 +1174,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tum( @@ -1183,7 +1183,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tum( @@ -1192,7 +1192,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tum( @@ -1201,7 +1201,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tum( @@ -1210,7 +1210,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tum( @@ -1219,7 +1219,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_xu_f_w_u16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tum( @@ -1228,7 +1228,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tum( @@ -1237,7 +1237,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_xu_f_w_u16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tum( @@ -1246,7 +1246,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tum( @@ -1255,7 +1255,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_xu_f_w_u16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tum( @@ -1264,7 +1264,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tum( @@ -1273,7 +1273,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vfncvt_f_x_w_f16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_tum( @@ -1282,7 +1282,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vfncvt_f_x_w_f16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_tum( @@ -1291,7 +1291,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_x_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vfncvt_f_x_w_f16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_tum( @@ -1300,7 +1300,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_x_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vfncvt_f_x_w_f16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_tum( @@ -1309,7 +1309,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_x_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vfncvt_f_x_w_f16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tum( @@ -1318,7 +1318,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_tum( @@ -1327,7 +1327,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_tum( @@ -1336,7 +1336,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vfncvt_f_xu_w_f16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_tum( @@ -1345,7 +1345,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vfncvt_f_xu_w_f16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_tum( @@ -1354,7 +1354,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vfncvt_f_xu_w_f16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tum( @@ -1363,7 +1363,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_f_f_w_f16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tum( @@ -1372,7 +1372,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tum( @@ -1381,7 +1381,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_f_f_w_f16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tum( @@ -1390,7 +1390,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tum( @@ -1399,7 +1399,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_f_f_w_f16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tum( @@ -1408,7 +1408,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tum( @@ -1417,7 +1417,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_f_f_w_f16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tum( @@ -1426,7 +1426,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tum( @@ -1435,7 +1435,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_f_f_w_f16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tum( @@ -1444,7 +1444,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tum( @@ -1453,7 +1453,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_x_f_w_i32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tum( @@ -1462,7 +1462,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tum( @@ -1471,7 +1471,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_x_f_w_i32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tum( @@ -1480,7 +1480,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tum( @@ -1489,7 +1489,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_x_f_w_i32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tum( @@ -1498,7 +1498,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tum( @@ -1507,7 +1507,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_x_f_w_i32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tum( @@ -1516,7 +1516,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tum( @@ -1525,7 +1525,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_xu_f_w_u32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tum( @@ -1534,7 +1534,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tum( @@ -1543,7 +1543,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_xu_f_w_u32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tum( @@ -1552,7 +1552,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tum( @@ -1561,7 +1561,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_xu_f_w_u32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tum( @@ -1570,7 +1570,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tum( @@ -1579,7 +1579,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_xu_f_w_u32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tum( @@ -1588,7 +1588,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tum( @@ -1597,7 +1597,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vfncvt_f_x_w_f32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_tum( @@ -1606,7 +1606,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vfncvt_f_x_w_f32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_tum( @@ -1615,7 +1615,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vfncvt_f_x_w_f32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_tum( @@ -1624,7 +1624,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vfncvt_f_x_w_f32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_tum( @@ -1633,7 +1633,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vfncvt_f_xu_w_f32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_tum( @@ -1642,7 +1642,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vfncvt_f_xu_w_f32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_tum( @@ -1651,7 +1651,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vfncvt_f_xu_w_f32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_tum( @@ -1660,7 +1660,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vfncvt_f_xu_w_f32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_tum( @@ -1669,7 +1669,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_f_f_w_f32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tum( @@ -1678,7 +1678,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tum( @@ -1687,7 +1687,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_f_f_w_f32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tum( @@ -1696,7 +1696,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tum( @@ -1705,7 +1705,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_f_f_w_f32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tum( @@ -1714,7 +1714,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tum( @@ -1723,7 +1723,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_f_f_w_f32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tum( @@ -1732,7 +1732,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_tumu( @@ -1741,7 +1741,7 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_x_f_w_i8mf8_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tumu( @@ -1750,7 +1750,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf8_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tumu( @@ -1759,7 +1759,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_x_f_w_i8mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tumu( @@ -1768,7 +1768,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tumu( @@ -1777,7 +1777,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_x_f_w_i8mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tumu( @@ -1786,7 +1786,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tumu( @@ -1795,7 +1795,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_x_f_w_i8m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tumu( @@ -1804,7 +1804,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tumu( @@ -1813,7 +1813,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_x_f_w_i8m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tumu( @@ -1822,7 +1822,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tumu( @@ -1831,7 +1831,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_x_f_w_i8m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tumu( @@ -1840,7 +1840,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tumu( @@ -1849,7 +1849,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf8_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tumu( @@ -1858,7 +1858,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf8_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tumu( @@ -1867,7 +1867,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tumu( @@ -1876,7 +1876,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tumu( @@ -1885,7 +1885,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tumu( @@ -1894,7 +1894,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tumu( @@ -1903,7 +1903,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_xu_f_w_u8m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tumu( @@ -1912,7 +1912,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tumu( @@ -1921,7 +1921,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_xu_f_w_u8m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tumu( @@ -1930,7 +1930,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tumu( @@ -1939,7 +1939,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_xu_f_w_u8m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tumu( @@ -1948,7 +1948,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tumu( @@ -1957,7 +1957,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_x_f_w_i16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tumu( @@ -1966,7 +1966,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tumu( @@ -1975,7 +1975,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_x_f_w_i16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tumu( @@ -1984,7 +1984,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tumu( @@ -1993,7 +1993,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_x_f_w_i16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tumu( @@ -2002,7 +2002,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tumu( @@ -2011,7 +2011,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_x_f_w_i16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tumu( @@ -2020,7 +2020,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tumu( @@ -2029,7 +2029,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_x_f_w_i16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tumu( @@ -2038,7 +2038,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tumu( @@ -2047,7 +2047,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tumu( @@ -2056,7 +2056,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tumu( @@ -2065,7 +2065,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tumu( @@ -2074,7 +2074,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tumu( @@ -2083,7 +2083,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_xu_f_w_u16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tumu( @@ -2092,7 +2092,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tumu( @@ -2101,7 +2101,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_xu_f_w_u16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tumu( @@ -2110,7 +2110,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tumu( @@ -2119,7 +2119,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_xu_f_w_u16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tumu( @@ -2128,7 +2128,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tumu( @@ -2137,7 +2137,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vfncvt_f_x_w_f16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_tumu( @@ -2146,7 +2146,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vfncvt_f_x_w_f16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_tumu( @@ -2155,7 +2155,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_x_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vfncvt_f_x_w_f16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_tumu( @@ -2164,7 +2164,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_x_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vfncvt_f_x_w_f16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_tumu( @@ -2173,7 +2173,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_x_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vfncvt_f_x_w_f16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_tumu( @@ -2182,7 +2182,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_tumu( @@ -2191,7 +2191,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_tumu( @@ -2200,7 +2200,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vfncvt_f_xu_w_f16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_tumu( @@ -2209,7 +2209,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vfncvt_f_xu_w_f16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_tumu( @@ -2218,7 +2218,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vfncvt_f_xu_w_f16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_tumu( @@ -2227,7 +2227,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_f_f_w_f16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tumu( @@ -2236,7 +2236,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tumu( @@ -2245,7 +2245,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_f_f_w_f16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tumu( @@ -2254,7 +2254,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tumu( @@ -2263,7 +2263,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_f_f_w_f16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tumu( @@ -2272,7 +2272,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tumu( @@ -2281,7 +2281,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_f_f_w_f16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tumu( @@ -2290,7 +2290,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tumu( @@ -2299,7 +2299,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_f_f_w_f16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tumu( @@ -2308,7 +2308,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tumu( @@ -2317,7 +2317,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_x_f_w_i32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tumu( @@ -2326,7 +2326,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tumu( @@ -2335,7 +2335,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_x_f_w_i32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tumu( @@ -2344,7 +2344,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tumu( @@ -2353,7 +2353,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_x_f_w_i32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tumu( @@ -2362,7 +2362,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tumu( @@ -2371,7 +2371,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_x_f_w_i32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tumu( @@ -2380,7 +2380,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tumu( @@ -2389,7 +2389,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_xu_f_w_u32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tumu( @@ -2398,7 +2398,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tumu( @@ -2407,7 +2407,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_xu_f_w_u32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tumu( @@ -2416,7 +2416,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tumu( @@ -2425,7 +2425,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_xu_f_w_u32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tumu( @@ -2434,7 +2434,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tumu( @@ -2443,7 +2443,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_xu_f_w_u32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tumu( @@ -2452,7 +2452,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tumu( @@ -2461,7 +2461,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vfncvt_f_x_w_f32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_tumu( @@ -2470,7 +2470,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vfncvt_f_x_w_f32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_tumu( @@ -2479,7 +2479,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vfncvt_f_x_w_f32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_tumu( @@ -2488,7 +2488,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vfncvt_f_x_w_f32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_tumu( @@ -2497,7 +2497,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vfncvt_f_xu_w_f32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_tumu( @@ -2506,7 +2506,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vfncvt_f_xu_w_f32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_tumu( @@ -2515,7 +2515,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vfncvt_f_xu_w_f32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_tumu( @@ -2524,7 +2524,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vfncvt_f_xu_w_f32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_tumu( @@ -2533,7 +2533,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_f_f_w_f32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tumu( @@ -2542,7 +2542,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tumu( @@ -2551,7 +2551,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_f_f_w_f32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tumu( @@ -2560,7 +2560,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tumu( @@ -2569,7 +2569,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_f_f_w_f32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tumu( @@ -2578,7 +2578,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tumu( @@ -2587,7 +2587,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_f_f_w_f32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tumu( @@ -2596,7 +2596,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_mu( @@ -2605,7 +2605,7 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_x_f_w_i8mf8_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_mu( @@ -2614,7 +2614,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf8_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_mu( @@ -2623,7 +2623,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_x_f_w_i8mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_mu( @@ -2632,7 +2632,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_mu( @@ -2641,7 +2641,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_x_f_w_i8mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_mu( @@ -2650,7 +2650,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_mu( @@ -2659,7 +2659,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_x_f_w_i8m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_mu( @@ -2668,7 +2668,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_mu( @@ -2677,7 +2677,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_x_f_w_i8m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_mu( @@ -2686,7 +2686,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_mu( @@ -2695,7 +2695,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_x_f_w_i8m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i8m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_mu( @@ -2704,7 +2704,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i8m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_mu( @@ -2713,7 +2713,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf8_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_mu( @@ -2722,7 +2722,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf8_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_mu( @@ -2731,7 +2731,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_mu( @@ -2740,7 +2740,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_mu( @@ -2749,7 +2749,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_mu( @@ -2758,7 +2758,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_mu( @@ -2767,7 +2767,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_xu_f_w_u8m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_mu( @@ -2776,7 +2776,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_mu( @@ -2785,7 +2785,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_xu_f_w_u8m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_mu( @@ -2794,7 +2794,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_mu( @@ -2803,7 +2803,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_xu_f_w_u8m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u8m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_mu( @@ -2812,7 +2812,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u8m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_mu( @@ -2821,7 +2821,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_x_f_w_i16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_mu( @@ -2830,7 +2830,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_mu( @@ -2839,7 +2839,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_x_f_w_i16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_mu( @@ -2848,7 +2848,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_mu( @@ -2857,7 +2857,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_x_f_w_i16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_mu( @@ -2866,7 +2866,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_mu( @@ -2875,7 +2875,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_x_f_w_i16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_mu( @@ -2884,7 +2884,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_mu( @@ -2893,7 +2893,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_x_f_w_i16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_mu( @@ -2902,7 +2902,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_mu( @@ -2911,7 +2911,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_mu( @@ -2920,7 +2920,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_mu( @@ -2929,7 +2929,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_mu( @@ -2938,7 +2938,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_mu( @@ -2947,7 +2947,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_xu_f_w_u16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_mu( @@ -2956,7 +2956,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_mu( @@ -2965,7 +2965,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_xu_f_w_u16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_mu( @@ -2974,7 +2974,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_mu( @@ -2983,7 +2983,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_xu_f_w_u16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_mu( @@ -2992,7 +2992,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_mu( @@ -3001,7 +3001,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vfncvt_f_x_w_f16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_mu( @@ -3010,7 +3010,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vfncvt_f_x_w_f16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_mu( @@ -3019,7 +3019,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_x_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vfncvt_f_x_w_f16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_mu( @@ -3028,7 +3028,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_x_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vfncvt_f_x_w_f16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_mu( @@ -3037,7 +3037,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_x_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vfncvt_f_x_w_f16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_mu( @@ -3046,7 +3046,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_mu( @@ -3055,7 +3055,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_mu( @@ -3064,7 +3064,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vfncvt_f_xu_w_f16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_mu( @@ -3073,7 +3073,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vfncvt_f_xu_w_f16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_mu( @@ -3082,7 +3082,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vfncvt_f_xu_w_f16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_mu( @@ -3091,7 +3091,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_f_f_w_f16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_mu( @@ -3100,7 +3100,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_mu( @@ -3109,7 +3109,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_f_f_w_f16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_mu( @@ -3118,7 +3118,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_mu( @@ -3127,7 +3127,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_f_f_w_f16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_mu( @@ -3136,7 +3136,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_mu( @@ -3145,7 +3145,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_f_f_w_f16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_mu( @@ -3154,7 +3154,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_mu( @@ -3163,7 +3163,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_f_f_w_f16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_mu( @@ -3172,7 +3172,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_mu( @@ -3181,7 +3181,7 @@ vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_x_f_w_i32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_mu( @@ -3190,7 +3190,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_mu( @@ -3199,7 +3199,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_x_f_w_i32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_mu( @@ -3208,7 +3208,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_mu( @@ -3217,7 +3217,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_x_f_w_i32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_mu( @@ -3226,7 +3226,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_mu( @@ -3235,7 +3235,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_x_f_w_i32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_x_f_w_i32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_mu( @@ -3244,7 +3244,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_x_f_w_i32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_mu( @@ -3253,7 +3253,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_xu_f_w_u32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_mu( @@ -3262,7 +3262,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_mu( @@ -3271,7 +3271,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_xu_f_w_u32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_mu( @@ -3280,7 +3280,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_mu( @@ -3289,7 +3289,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_xu_f_w_u32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_mu( @@ -3298,7 +3298,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_mu( @@ -3307,7 +3307,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_xu_f_w_u32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_xu_f_w_u32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_mu( @@ -3316,7 +3316,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rtz_xu_f_w_u32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_mu( @@ -3325,7 +3325,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vfncvt_f_x_w_f32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_mu( @@ -3334,7 +3334,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vfncvt_f_x_w_f32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_mu( @@ -3343,7 +3343,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vfncvt_f_x_w_f32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_mu( @@ -3352,7 +3352,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vfncvt_f_x_w_f32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_x_w_f32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_mu( @@ -3361,7 +3361,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vfncvt_f_xu_w_f32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_mu( @@ -3370,7 +3370,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vfncvt_f_xu_w_f32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_mu( @@ -3379,7 +3379,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vfncvt_f_xu_w_f32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_mu( @@ -3388,7 +3388,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vfncvt_f_xu_w_f32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_xu_w_f32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_mu( @@ -3397,7 +3397,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_f_f_w_f32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_mu( @@ -3406,7 +3406,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_mu( @@ -3415,7 +3415,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_f_f_w_f32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_mu( @@ -3424,7 +3424,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_mu( @@ -3433,7 +3433,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_f_f_w_f32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_mu( @@ -3442,7 +3442,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_mu( @@ -3451,7 +3451,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_f_f_w_f32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_mu( @@ -3460,6 +3460,6 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfncvt_rod_f_f_w_f32m4_mu(mask, maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfneg.c index ba972767345e..b3ebf026ec57 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfneg.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfneg_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfneg_v_f16mf4_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfneg_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfneg_v_f16mf2_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfneg_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfneg_v_f16m1_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfneg_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfneg_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfneg_v_f16m2_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfneg_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfneg_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfneg_v_f16m4_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfneg_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfneg_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfneg_v_f16m8_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfneg_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfneg_v_f32mf2_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfneg_v_f32m1_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfneg_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfneg_v_f32m2_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfneg_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfneg_v_f32m4_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfneg_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfneg_v_f32m8_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfneg_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfneg_v_f64m1_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfneg_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfneg_v_f64m2_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfneg_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfneg_v_f64m4_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfneg_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfneg_v_f64m8_tu(maskedoff, op1, vl); + return __riscv_vfneg_v_f64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_tum( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfneg_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfneg_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfneg_v_f16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_tum( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfneg_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfneg_v_f16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfneg_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfneg_v_f16m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m2_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfneg_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfneg_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfneg_v_f16m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m4_tum( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfneg_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfneg_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfneg_v_f16m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m8_tum( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfneg_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfneg_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfneg_v_f16m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tum( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfneg_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfneg_v_f32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfneg_v_f32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfneg_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfneg_v_f32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfneg_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfneg_v_f32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfneg_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfneg_v_f32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfneg_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfneg_v_f64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfneg_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfneg_v_f64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_tum( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfneg_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfneg_v_f64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_tum( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfneg_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfneg_v_f64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_tumu( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfneg_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfneg_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfneg_v_f16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_tumu( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfneg_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfneg_v_f16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m1_tumu( @@ -301,7 +301,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfneg_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfneg_v_f16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m2_tumu( @@ -310,7 +310,7 @@ vfloat16m1_t test_vfneg_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfneg_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfneg_v_f16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m4_tumu( @@ -319,7 +319,7 @@ vfloat16m2_t test_vfneg_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfneg_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfneg_v_f16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m8_tumu( @@ -328,7 +328,7 @@ vfloat16m4_t test_vfneg_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfneg_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfneg_v_f16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat16m8_t test_vfneg_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfneg_v_f32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfneg_v_f32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_tumu( @@ -355,7 +355,7 @@ vfloat32m1_t test_vfneg_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfneg_v_f32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_tumu( @@ -364,7 +364,7 @@ vfloat32m2_t test_vfneg_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfneg_v_f32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_tumu( @@ -373,7 +373,7 @@ vfloat32m4_t test_vfneg_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfneg_v_f32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_tumu( @@ -382,7 +382,7 @@ vfloat32m8_t test_vfneg_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfneg_v_f64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_tumu( @@ -391,7 +391,7 @@ vfloat64m1_t test_vfneg_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfneg_v_f64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_tumu( @@ -400,7 +400,7 @@ vfloat64m2_t test_vfneg_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfneg_v_f64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_tumu( @@ -409,7 +409,7 @@ vfloat64m4_t test_vfneg_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfneg_v_f64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_mu( @@ -418,7 +418,7 @@ vfloat64m8_t test_vfneg_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfneg_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfneg_v_f16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_mu( @@ -427,7 +427,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfneg_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfneg_v_f16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m1_mu( @@ -436,7 +436,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfneg_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfneg_v_f16m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m2_mu( @@ -445,7 +445,7 @@ vfloat16m1_t test_vfneg_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfneg_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfneg_v_f16m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m4_mu( @@ -454,7 +454,7 @@ vfloat16m2_t test_vfneg_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfneg_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfneg_v_f16m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f16m8_mu( @@ -463,7 +463,7 @@ vfloat16m4_t test_vfneg_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfneg_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfneg_v_f16m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_mu( @@ -472,7 +472,7 @@ vfloat16m8_t test_vfneg_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfneg_v_f32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_mu( @@ -481,7 +481,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfneg_v_f32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_mu( @@ -490,7 +490,7 @@ vfloat32m1_t test_vfneg_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfneg_v_f32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_mu( @@ -499,7 +499,7 @@ vfloat32m2_t test_vfneg_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfneg_v_f32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_mu( @@ -508,7 +508,7 @@ vfloat32m4_t test_vfneg_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfneg_v_f32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_mu( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfneg_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfneg_v_f64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_mu( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfneg_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfneg_v_f64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_mu( @@ -535,7 +535,7 @@ vfloat64m2_t test_vfneg_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfneg_v_f64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_mu( @@ -544,6 +544,6 @@ vfloat64m4_t test_vfneg_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfneg_v_f64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfneg_v_f64m8_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c index 5d8d9b1b64ea..935847ab6ac5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vv_f16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vf_f16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vv_f16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vf_f16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vv_f16m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vf_f16m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vv_f16m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vf_f16m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vv_f16m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vf_f16m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vv_f16m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vf_f16m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vf_f32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vf_f32m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vf_f32m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vf_f32m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vf_f32m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vf_f64m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vf_f64m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vf_f64m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vf_f64m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vv_f16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vf_f16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vv_f16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vf_f16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vv_f16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vf_f16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vv_f16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vf_f16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vf_f32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vf_f32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vf_f32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vf_f32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vf_f64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vf_f64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vf_f64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vf_f64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmacc_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmacc_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vv_f16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmacc_vf_f16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vv_f16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmacc_vf_f16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vv_f16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmacc_vf_f16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vv_f16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmacc_vf_f16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmacc_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmacc_vf_f32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmacc_vf_f32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmacc_vf_f32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmacc_vf_f32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmacc_vf_f64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmacc_vf_f64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmacc_vf_f64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmacc_vf_f64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmacc_vf_f64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd.c index ca0b6711af8c..da19748f71ec 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmadd.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vv_f16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vf_f16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vv_f16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vf_f16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vv_f16m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vf_f16m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vv_f16m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vf_f16m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vv_f16m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vf_f16m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vv_f16m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vf_f16m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vf_f32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vf_f32m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vf_f32m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vf_f32m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vf_f32m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vf_f64m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vf_f64m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vf_f64m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vf_f64m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vv_f16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vf_f16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vv_f16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vf_f16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vv_f16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vf_f16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vv_f16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vf_f16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vf_f32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vf_f32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vf_f32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vf_f32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vf_f64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vf_f64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vf_f64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vf_f64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmadd_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmadd_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vv_f16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmadd_vf_f16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vv_f16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmadd_vf_f16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vv_f16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmadd_vf_f16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vv_f16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmadd_vf_f16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmadd_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmadd_vf_f32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmadd_vf_f32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmadd_vf_f32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmadd_vf_f32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmadd_vf_f64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmadd_vf_f64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmadd_vf_f64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmadd_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmadd_vf_f64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmadd_vf_f64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac.c index 082e1e117ede..7d00fcee7401 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsac.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vv_f16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vf_f16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vv_f16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vf_f16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vv_f16m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vf_f16m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vv_f16m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vf_f16m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vv_f16m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vf_f16m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vv_f16m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vf_f16m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vf_f32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vf_f32m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vf_f32m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vf_f32m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vf_f32m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vf_f64m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vf_f64m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vf_f64m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vf_f64m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vv_f16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vf_f16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vv_f16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vf_f16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vv_f16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vf_f16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vv_f16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vf_f16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vf_f32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vf_f32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vf_f32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vf_f32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vf_f64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vf_f64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vf_f64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vf_f64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsac_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsac_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vv_f16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsac_vf_f16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vv_f16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsac_vf_f16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vv_f16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsac_vf_f16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vv_f16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsac_vf_f16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsac_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsac_vf_f32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsac_vf_f32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsac_vf_f32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsac_vf_f32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsac_vf_f64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsac_vf_f64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsac_vf_f64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsac_vf_f64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsac_vf_f64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub.c index 0365273a1e98..f73d732913b3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfnmsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vv_f16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vf_f16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vv_f16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vf_f16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vv_f16m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vf_f16m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vv_f16m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vf_f16m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vv_f16m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vf_f16m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vv_f16m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vf_f16m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vf_f32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vf_f32m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vf_f32m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vf_f32m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vf_f32m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vf_f64m1_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vf_f64m2_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vf_f64m4_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vf_f64m8_tu(vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vv_f16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vf_f16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vv_f16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vf_f16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vv_f16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vf_f16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vv_f16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vf_f16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vf_f32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vf_f32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vf_f32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vf_f32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vf_f64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vf_f64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vf_f64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vf_f64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, double // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, double r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfnmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return vfnmsub_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfnmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return vfnmsub_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vv_f16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfnmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return vfnmsub_vf_f16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vv_f16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfnmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return vfnmsub_vf_f16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vv_f16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfnmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return vfnmsub_vf_f16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vv_f16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfnmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return vfnmsub_vf_f16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, _Float16 r // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return vfnmsub_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return vfnmsub_vf_f32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return vfnmsub_vf_f32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return vfnmsub_vf_f32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return vfnmsub_vf_f32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return vfnmsub_vf_f64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return vfnmsub_vf_f64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return vfnmsub_vf_f64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, double rs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfnmsub_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return vfnmsub_vf_f64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vfnmsub_vf_f64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrdiv.c index 5a761886fe71..d1eda89938bd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrdiv.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_tum( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_tum( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_tum( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_tum( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tum( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_tum( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_tum( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_tumu( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_tumu( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_tumu( @@ -301,7 +301,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_tumu( @@ -310,7 +310,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_tumu( @@ -319,7 +319,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_tumu( @@ -328,7 +328,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_tumu( @@ -355,7 +355,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_tumu( @@ -364,7 +364,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_tumu( @@ -373,7 +373,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_tumu( @@ -382,7 +382,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_tumu( @@ -391,7 +391,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_tumu( @@ -400,7 +400,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_tumu( @@ -409,7 +409,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_mu( @@ -418,7 +418,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_mu( @@ -427,7 +427,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_mu( @@ -436,7 +436,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_mu( @@ -445,7 +445,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_mu( @@ -454,7 +454,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_mu( @@ -463,7 +463,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_mu( @@ -472,7 +472,7 @@ vfloat16m8_t test_vfrdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_mu( @@ -481,7 +481,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_mu( @@ -490,7 +490,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_mu( @@ -499,7 +499,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_mu( @@ -508,7 +508,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrdiv_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_mu( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_mu( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_mu( @@ -535,7 +535,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_mu( @@ -544,6 +544,6 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrdiv_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrdiv_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7.c index 74ef076e724e..15dd45fd6e9b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrec7.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrec7_v_f16mf4_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrec7_v_f16mf2_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrec7_v_f16m1_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrec7_v_f16m2_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrec7_v_f16m4_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrec7_v_f16m8_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrec7_v_f32mf2_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrec7_v_f32m1_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrec7_v_f32m2_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrec7_v_f32m4_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrec7_v_f32m8_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrec7_v_f64m1_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrec7_v_f64m2_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrec7_v_f64m4_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrec7_v_f64m8_tu(maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_tum( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrec7_v_f16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_tum( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrec7_v_f16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrec7_v_f16m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrec7_v_f16m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_tum( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrec7_v_f16m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_tum( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrec7_v_f16m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tum( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrec7_v_f32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrec7_v_f32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrec7_v_f32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrec7_v_f32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrec7_v_f32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrec7_v_f64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrec7_v_f64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_tum( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrec7_v_f64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_tum( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrec7_v_f64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_tumu( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrec7_v_f16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_tumu( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrec7_v_f16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_tumu( @@ -301,7 +301,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrec7_v_f16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_tumu( @@ -310,7 +310,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrec7_v_f16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_tumu( @@ -319,7 +319,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrec7_v_f16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_tumu( @@ -328,7 +328,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrec7_v_f16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrec7_v_f32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrec7_v_f32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_tumu( @@ -355,7 +355,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrec7_v_f32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_tumu( @@ -364,7 +364,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrec7_v_f32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_tumu( @@ -373,7 +373,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrec7_v_f32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_tumu( @@ -382,7 +382,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrec7_v_f64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_tumu( @@ -391,7 +391,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrec7_v_f64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_tumu( @@ -400,7 +400,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrec7_v_f64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_tumu( @@ -409,7 +409,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrec7_v_f64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_mu( @@ -418,7 +418,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrec7_v_f16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_mu( @@ -427,7 +427,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrec7_v_f16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_mu( @@ -436,7 +436,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrec7_v_f16m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_mu( @@ -445,7 +445,7 @@ vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrec7_v_f16m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_mu( @@ -454,7 +454,7 @@ vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrec7_v_f16m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_mu( @@ -463,7 +463,7 @@ vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrec7_v_f16m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_mu( @@ -472,7 +472,7 @@ vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrec7_v_f32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_mu( @@ -481,7 +481,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrec7_v_f32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_mu( @@ -490,7 +490,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrec7_v_f32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_mu( @@ -499,7 +499,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrec7_v_f32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_mu( @@ -508,7 +508,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrec7_v_f32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_mu( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrec7_v_f64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_mu( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrec7_v_f64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_mu( @@ -535,7 +535,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrec7_v_f64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_mu( @@ -544,6 +544,6 @@ vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrec7_v_f64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfrec7_v_f64m8_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredmax.c index 4cb1d044a495..b4c7838a73c9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredmax.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_tu( @@ -22,7 +22,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_tu( @@ -94,7 +94,7 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_tu( @@ -103,7 +103,7 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_tu( @@ -130,7 +130,7 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_tu( @@ -139,7 +139,7 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_tum( @@ -148,7 +148,7 @@ vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_tum( @@ -157,7 +157,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_tum( @@ -184,7 +184,7 @@ vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_tum( @@ -193,7 +193,7 @@ vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmax_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tum( @@ -202,7 +202,7 @@ vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_tum( @@ -229,7 +229,7 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_tum( @@ -238,7 +238,7 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmax_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_tum( @@ -265,7 +265,7 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_tum( @@ -274,6 +274,6 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmax_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmax_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredmin.c index 7109b579b261..462c03ba9d62 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredmin.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_tu( @@ -22,7 +22,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_tu( @@ -94,7 +94,7 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_tu( @@ -103,7 +103,7 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_tu( @@ -130,7 +130,7 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_tu( @@ -139,7 +139,7 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_tum( @@ -148,7 +148,7 @@ vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_tum( @@ -157,7 +157,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_tum( @@ -184,7 +184,7 @@ vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_tum( @@ -193,7 +193,7 @@ vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredmin_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tum( @@ -202,7 +202,7 @@ vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_tum( @@ -229,7 +229,7 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_tum( @@ -238,7 +238,7 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredmin_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_tum( @@ -265,7 +265,7 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_tum( @@ -274,6 +274,6 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredmin_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredmin_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredosum.c index 86ef5038d854..ed40774a50ad 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredosum.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tu( @@ -22,7 +22,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tu( @@ -94,7 +94,7 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tu( @@ -103,7 +103,7 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tu( @@ -130,7 +130,7 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tu( @@ -139,7 +139,7 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_tum( @@ -148,7 +148,7 @@ vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_tum( @@ -157,7 +157,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_tum( @@ -184,7 +184,7 @@ vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_tum( @@ -193,7 +193,7 @@ vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredosum_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum( @@ -202,7 +202,7 @@ vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_tum( @@ -229,7 +229,7 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_tum( @@ -238,7 +238,7 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredosum_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_tum( @@ -265,7 +265,7 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_tum( @@ -274,6 +274,6 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredosum_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredosum_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredusum.c index 10ab21c4259f..43c259afce5f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfredusum.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tu( @@ -22,7 +22,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tu( @@ -94,7 +94,7 @@ vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tu( @@ -103,7 +103,7 @@ vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tu( @@ -130,7 +130,7 @@ vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tu( @@ -139,7 +139,7 @@ vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_tum( @@ -148,7 +148,7 @@ vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_tum( @@ -157,7 +157,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_tum( @@ -184,7 +184,7 @@ vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_tum( @@ -193,7 +193,7 @@ vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return vfredusum_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum( @@ -202,7 +202,7 @@ vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_tum( @@ -229,7 +229,7 @@ vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_tum( @@ -238,7 +238,7 @@ vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredusum_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_tum( @@ -265,7 +265,7 @@ vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_tum( @@ -274,6 +274,6 @@ vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredusum_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfredusum_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsqrt7.c index 7649d5be503c..649d9895f9f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsqrt7.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrsqrt7_v_f16mf4_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrsqrt7_v_f16mf2_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsqrt7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrsqrt7_v_f16m1_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsqrt7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrsqrt7_v_f16m2_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsqrt7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrsqrt7_v_f16m4_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsqrt7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrsqrt7_v_f16m8_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrsqrt7_v_f32mf2_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrsqrt7_v_f32m1_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrsqrt7_v_f32m2_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrsqrt7_v_f32m4_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrsqrt7_v_f32m8_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrsqrt7_v_f64m1_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrsqrt7_v_f64m2_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrsqrt7_v_f64m4_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrsqrt7_v_f64m8_tu(maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_tum( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrsqrt7_v_f16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_tum( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrsqrt7_v_f16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsqrt7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrsqrt7_v_f16m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsqrt7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrsqrt7_v_f16m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_tum( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsqrt7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrsqrt7_v_f16m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_tum( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsqrt7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrsqrt7_v_f16m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tum( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrsqrt7_v_f32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrsqrt7_v_f32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrsqrt7_v_f32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrsqrt7_v_f32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrsqrt7_v_f32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrsqrt7_v_f64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrsqrt7_v_f64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_tum( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrsqrt7_v_f64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_tum( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrsqrt7_v_f64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_tumu( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrsqrt7_v_f16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_tumu( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrsqrt7_v_f16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_tumu( @@ -301,7 +301,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsqrt7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrsqrt7_v_f16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_tumu( @@ -310,7 +310,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsqrt7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrsqrt7_v_f16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_tumu( @@ -319,7 +319,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsqrt7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrsqrt7_v_f16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_tumu( @@ -328,7 +328,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsqrt7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrsqrt7_v_f16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrsqrt7_v_f32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrsqrt7_v_f32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_tumu( @@ -355,7 +355,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrsqrt7_v_f32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_tumu( @@ -364,7 +364,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrsqrt7_v_f32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_tumu( @@ -373,7 +373,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrsqrt7_v_f32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_tumu( @@ -382,7 +382,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrsqrt7_v_f64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_tumu( @@ -391,7 +391,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrsqrt7_v_f64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_tumu( @@ -400,7 +400,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrsqrt7_v_f64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_tumu( @@ -409,7 +409,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrsqrt7_v_f64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_mu( @@ -418,7 +418,7 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrsqrt7_v_f16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_mu( @@ -427,7 +427,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrsqrt7_v_f16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_mu( @@ -436,7 +436,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsqrt7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrsqrt7_v_f16m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_mu( @@ -445,7 +445,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsqrt7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrsqrt7_v_f16m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_mu( @@ -454,7 +454,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsqrt7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrsqrt7_v_f16m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_mu( @@ -463,7 +463,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsqrt7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrsqrt7_v_f16m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_mu( @@ -472,7 +472,7 @@ vfloat16m8_t test_vfrsqrt7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrsqrt7_v_f32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_mu( @@ -481,7 +481,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrsqrt7_v_f32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_mu( @@ -490,7 +490,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrsqrt7_v_f32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_mu( @@ -499,7 +499,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrsqrt7_v_f32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_mu( @@ -508,7 +508,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrsqrt7_v_f32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_mu( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrsqrt7_v_f64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_mu( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrsqrt7_v_f64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_mu( @@ -535,7 +535,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrsqrt7_v_f64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_mu( @@ -544,6 +544,6 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrsqrt7_v_f64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfrsqrt7_v_f64m8_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsub.c index ca99198611b9..784ae88959a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfrsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_tum( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_tum( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_tum( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_tum( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tum( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_tum( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_tum( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_tumu( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_tumu( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_tumu( @@ -301,7 +301,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_tumu( @@ -310,7 +310,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_tumu( @@ -319,7 +319,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_tumu( @@ -328,7 +328,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_tumu( @@ -355,7 +355,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_tumu( @@ -364,7 +364,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_tumu( @@ -373,7 +373,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_tumu( @@ -382,7 +382,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_tumu( @@ -391,7 +391,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_tumu( @@ -400,7 +400,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_tumu( @@ -409,7 +409,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_mu( @@ -418,7 +418,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfrsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_mu( @@ -427,7 +427,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfrsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_mu( @@ -436,7 +436,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfrsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_mu( @@ -445,7 +445,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfrsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_mu( @@ -454,7 +454,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfrsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_mu( @@ -463,7 +463,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfrsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_mu( @@ -472,7 +472,7 @@ vfloat16m8_t test_vfrsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_mu( @@ -481,7 +481,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_mu( @@ -490,7 +490,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_mu( @@ -499,7 +499,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_mu( @@ -508,7 +508,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_mu( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_mu( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_mu( @@ -535,7 +535,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_mu( @@ -544,6 +544,6 @@ vfloat64m4_t test_vfrsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfrsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnj.c index 151f13692ce0..375237f75319 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnj.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnj_vv_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnj_vv_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnj_vv_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnj_vv_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnj_vv_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnj_vv_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnj_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnj_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnj_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnj_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnj_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnj_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnj_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnj_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnj_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, d // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnj_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnj_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnj_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnj_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnj_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnj_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnj_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnj_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnj_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnj_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnj_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnj_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnj_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnj_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnj_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnj_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnj_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnj_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnj_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnj_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnj_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnj_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnj_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnj_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnj_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnj_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnj_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnj_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnj_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnj_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnj_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfsgnj_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnj_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfsgnj_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnj_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfsgnj_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnj_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfsgnj_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnj_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfsgnj_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnj_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfsgnj_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnj_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfsgnj_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnj_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfsgnj_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnj_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfsgnj_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnj_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfsgnj_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnj_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfsgnj_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnj_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfsgnj_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnj_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnj_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnj_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnj_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnj_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnj_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnj_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnj_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnj_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnj_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnj_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnj_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjn.c index f29dce061088..f5b7cb14d82b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjn.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjn.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjn_vv_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjn_vv_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjn_vv_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjn_vv_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjn_vv_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjn_vv_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjn_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjn_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjn_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjn_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjn_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjn_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjn_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjn_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjn_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjn_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjn_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjn_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjn_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjn_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjn_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjn_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjn_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjn_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjn_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjn_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjn_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjn_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjn_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjn_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjn_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjn_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjn_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjn_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjn_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjn_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjn_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjn_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjn_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjn_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjn_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjn_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjn_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjn_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjn_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjn_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjn_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjn_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfsgnjn_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjn_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfsgnjn_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjn_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfsgnjn_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjn_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfsgnjn_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjn_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfsgnjn_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjn_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfsgnjn_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjn_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfsgnjn_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjn_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfsgnjn_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjn_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjn_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjn_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjn_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjn_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjn_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjn_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjn_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjn_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjn_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjn_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjx.c index 3f7b4d782261..5c2adf589013 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjx.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsgnjx.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjx_vv_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjx_vv_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjx_vv_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjx_vv_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjx_vv_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjx_vv_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjx_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjx_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjx_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjx_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjx_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjx_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjx_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjx_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjx_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjx_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjx_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjx_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjx_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjx_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjx_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjx_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjx_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjx_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjx_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjx_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjx_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjx_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjx_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjx_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjx_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjx_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjx_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjx_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjx_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjx_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjx_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjx_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjx_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjx_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjx_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjx_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjx_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjx_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjx_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjx_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjx_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjx_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfsgnjx_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsgnjx_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfsgnjx_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjx_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfsgnjx_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsgnjx_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfsgnjx_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjx_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfsgnjx_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsgnjx_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfsgnjx_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjx_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfsgnjx_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsgnjx_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfsgnjx_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjx_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjx_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjx_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjx_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjx_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjx_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjx_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjx_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjx_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjx_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsgnjx_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1down.c index 7f948a9a854e..359ccd2e4ea2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1down.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf4_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16mf4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf2_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1down_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m1_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1down_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m2_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1down_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m4_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1down_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m8_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1down_vf_f32mf2_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1down_vf_f32m1_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1down_vf_f32m2_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1down_vf_f32m4_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1down_vf_f32m8_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1down_vf_f64m1_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1down_vf_f64m2_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1down_vf_f64m4_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1down_vf_f64m8_tu(maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_tum( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1down_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf4_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16mf4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_tum( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1down_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1down_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1down_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_tum( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1down_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_tum( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1down_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tum( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1down_vf_f32mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1down_vf_f32m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1down_vf_f32m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1down_vf_f32m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1down_vf_f32m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1down_vf_f64m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1down_vf_f64m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_tum( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1down_vf_f64m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_tum( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1down_vf_f64m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_tumu( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1down_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16mf4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_tumu( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1down_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_tumu( @@ -301,7 +301,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1down_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_tumu( @@ -310,7 +310,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1down_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_tumu( @@ -319,7 +319,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1down_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_tumu( @@ -328,7 +328,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1down_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1down_vf_f32mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1down_vf_f32m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_tumu( @@ -355,7 +355,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1down_vf_f32m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_tumu( @@ -364,7 +364,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1down_vf_f32m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_tumu( @@ -373,7 +373,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1down_vf_f32m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_tumu( @@ -382,7 +382,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1down_vf_f64m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_tumu( @@ -391,7 +391,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1down_vf_f64m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_tumu( @@ -400,7 +400,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1down_vf_f64m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_tumu( @@ -409,7 +409,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1down_vf_f64m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_mu( @@ -418,7 +418,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1down_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf4_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16mf4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_mu( @@ -427,7 +427,7 @@ vfloat16mf4_t test_vfslide1down_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1down_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_mu( @@ -436,7 +436,7 @@ vfloat16mf2_t test_vfslide1down_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1down_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_mu( @@ -445,7 +445,7 @@ vfloat16m1_t test_vfslide1down_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1down_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_mu( @@ -454,7 +454,7 @@ vfloat16m2_t test_vfslide1down_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1down_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_mu( @@ -463,7 +463,7 @@ vfloat16m4_t test_vfslide1down_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1down_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f16m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_mu( @@ -472,7 +472,7 @@ vfloat16m8_t test_vfslide1down_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1down_vf_f32mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_mu( @@ -481,7 +481,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1down_vf_f32m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_mu( @@ -490,7 +490,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1down_vf_f32m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_mu( @@ -499,7 +499,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1down_vf_f32m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_mu( @@ -508,7 +508,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1down_vf_f32m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f32m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_mu( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1down_vf_f64m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_mu( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1down_vf_f64m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_mu( @@ -535,7 +535,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1down_vf_f64m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_mu( @@ -544,6 +544,6 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1down_vf_f64m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1down_vf_f64m8_mu(mask, maskedoff, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1up.c index 282dcf10d2ce..390b90b44791 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfslide1up.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1up_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf4_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16mf4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1up_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf2_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1up_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m1_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1up_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m2_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1up_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m4_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1up_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m8_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1up_vf_f32mf2_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1up_vf_f32m1_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1up_vf_f32m2_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1up_vf_f32m4_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1up_vf_f32m8_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1up_vf_f64m1_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1up_vf_f64m2_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1up_vf_f64m4_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1up_vf_f64m8_tu(maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_tum( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1up_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf4_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16mf4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_tum( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1up_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1up_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1up_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_tum( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1up_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_tum( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1up_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tum( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1up_vf_f32mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1up_vf_f32m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1up_vf_f32m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1up_vf_f32m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1up_vf_f32m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1up_vf_f64m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1up_vf_f64m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_tum( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1up_vf_f64m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_tum( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1up_vf_f64m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_tumu( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1up_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16mf4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_tumu( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1up_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_tumu( @@ -301,7 +301,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1up_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_tumu( @@ -310,7 +310,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1up_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_tumu( @@ -319,7 +319,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1up_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_tumu( @@ -328,7 +328,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1up_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1up_vf_f32mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1up_vf_f32m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_tumu( @@ -355,7 +355,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1up_vf_f32m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_tumu( @@ -364,7 +364,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1up_vf_f32m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_tumu( @@ -373,7 +373,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1up_vf_f32m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_tumu( @@ -382,7 +382,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1up_vf_f64m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_tumu( @@ -391,7 +391,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1up_vf_f64m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_tumu( @@ -400,7 +400,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1up_vf_f64m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_tumu( @@ -409,7 +409,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1up_vf_f64m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_mu( @@ -418,7 +418,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfslide1up_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf4_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16mf4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_mu( @@ -427,7 +427,7 @@ vfloat16mf4_t test_vfslide1up_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfslide1up_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_mu( @@ -436,7 +436,7 @@ vfloat16mf2_t test_vfslide1up_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfslide1up_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_mu( @@ -445,7 +445,7 @@ vfloat16m1_t test_vfslide1up_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfslide1up_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_mu( @@ -454,7 +454,7 @@ vfloat16m2_t test_vfslide1up_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfslide1up_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_mu( @@ -463,7 +463,7 @@ vfloat16m4_t test_vfslide1up_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfslide1up_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f16m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_mu( @@ -472,7 +472,7 @@ vfloat16m8_t test_vfslide1up_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1up_vf_f32mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_mu( @@ -481,7 +481,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1up_vf_f32m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_mu( @@ -490,7 +490,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1up_vf_f32m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_mu( @@ -499,7 +499,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1up_vf_f32m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_mu( @@ -508,7 +508,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1up_vf_f32m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f32m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_mu( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1up_vf_f64m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_mu( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1up_vf_f64m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_mu( @@ -535,7 +535,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1up_vf_f64m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_mu( @@ -544,6 +544,6 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1up_vf_f64m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vfslide1up_vf_f64m8_mu(mask, maskedoff, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsqrt.c index d3fe27d458ea..47ba0c31993a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsqrt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfsqrt_v_f16mf4_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfsqrt_v_f16mf2_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfsqrt_v_f16m1_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfsqrt_v_f16m2_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfsqrt_v_f16m4_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfsqrt_v_f16m8_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfsqrt_v_f32mf2_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfsqrt_v_f32m1_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfsqrt_v_f32m2_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfsqrt_v_f32m4_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfsqrt_v_f32m8_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfsqrt_v_f64m1_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfsqrt_v_f64m2_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfsqrt_v_f64m4_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfsqrt_v_f64m8_tu(maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_tum( @@ -148,7 +148,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfsqrt_v_f16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_tum( @@ -157,7 +157,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfsqrt_v_f16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_tum( @@ -166,7 +166,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfsqrt_v_f16m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_tum( @@ -175,7 +175,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfsqrt_v_f16m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_tum( @@ -184,7 +184,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfsqrt_v_f16m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_tum( @@ -193,7 +193,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfsqrt_v_f16m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tum( @@ -202,7 +202,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfsqrt_v_f32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_tum( @@ -211,7 +211,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfsqrt_v_f32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfsqrt_v_f32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfsqrt_v_f32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfsqrt_v_f32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_tum( @@ -247,7 +247,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfsqrt_v_f64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_tum( @@ -256,7 +256,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfsqrt_v_f64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_tum( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfsqrt_v_f64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_tum( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfsqrt_v_f64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_tumu( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfsqrt_v_f16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_tumu( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfsqrt_v_f16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_tumu( @@ -301,7 +301,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfsqrt_v_f16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_tumu( @@ -310,7 +310,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfsqrt_v_f16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_tumu( @@ -319,7 +319,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfsqrt_v_f16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_tumu( @@ -328,7 +328,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfsqrt_v_f16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfsqrt_v_f32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfsqrt_v_f32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_tumu( @@ -355,7 +355,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfsqrt_v_f32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_tumu( @@ -364,7 +364,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfsqrt_v_f32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_tumu( @@ -373,7 +373,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfsqrt_v_f32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_tumu( @@ -382,7 +382,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfsqrt_v_f64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_tumu( @@ -391,7 +391,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfsqrt_v_f64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_tumu( @@ -400,7 +400,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfsqrt_v_f64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_tumu( @@ -409,7 +409,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfsqrt_v_f64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_mu( @@ -418,7 +418,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfsqrt_v_f16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_mu( @@ -427,7 +427,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfsqrt_v_f16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_mu( @@ -436,7 +436,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfsqrt_v_f16m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_mu( @@ -445,7 +445,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfsqrt_v_f16m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_mu( @@ -454,7 +454,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfsqrt_v_f16m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_mu( @@ -463,7 +463,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfsqrt_v_f16m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_mu( @@ -472,7 +472,7 @@ vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfsqrt_v_f32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_mu( @@ -481,7 +481,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfsqrt_v_f32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_mu( @@ -490,7 +490,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfsqrt_v_f32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_mu( @@ -499,7 +499,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfsqrt_v_f32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_mu( @@ -508,7 +508,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfsqrt_v_f32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_mu( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfsqrt_v_f64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_mu( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfsqrt_v_f64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_mu( @@ -535,7 +535,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfsqrt_v_f64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_mu( @@ -544,6 +544,6 @@ vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfsqrt_v_f64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vfsqrt_v_f64m8_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsub.c index eca6cc2b477d..ab5aaf04f2ba 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsub_vv_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsub_vv_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsub_vv_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vfsub_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vfsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsub_vv_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vfsub_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vfsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsub_vv_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vfsub_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vfsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsub_vv_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vfsub_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsub_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsub_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsub_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsub_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsub_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vfsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsub_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vfsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsub_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vfsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsub_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vfsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsub_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vfsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsub_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vfsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsub_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsub_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsub_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsub_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsub_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vfsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsub_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vfsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsub_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vfsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsub_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_tum( @@ -283,7 +283,7 @@ vfloat64m8_t test_vfsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsub_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_tum( @@ -292,7 +292,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_tum( @@ -301,7 +301,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsub_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_tum( @@ -310,7 +310,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_tum( @@ -319,7 +319,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsub_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_tum( @@ -328,7 +328,7 @@ vfloat16m1_t test_vfsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_tum( @@ -337,7 +337,7 @@ vfloat16m1_t test_vfsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsub_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_tum( @@ -346,7 +346,7 @@ vfloat16m2_t test_vfsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_tum( @@ -355,7 +355,7 @@ vfloat16m2_t test_vfsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsub_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_tum( @@ -364,7 +364,7 @@ vfloat16m4_t test_vfsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_tum( @@ -373,7 +373,7 @@ vfloat16m4_t test_vfsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsub_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_tum( @@ -382,7 +382,7 @@ vfloat16m8_t test_vfsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tum( @@ -391,7 +391,7 @@ vfloat16m8_t test_vfsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsub_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tum( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsub_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_tum( @@ -409,7 +409,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsub_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_tum( @@ -418,7 +418,7 @@ vfloat32m1_t test_vfsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsub_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m1_t test_vfsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsub_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsub_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsub_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsub_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsub_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_tum( @@ -472,7 +472,7 @@ vfloat32m8_t test_vfsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsub_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m8_t test_vfsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsub_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vfsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsub_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_tum( @@ -499,7 +499,7 @@ vfloat64m1_t test_vfsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsub_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_tum( @@ -508,7 +508,7 @@ vfloat64m2_t test_vfsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsub_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_tum( @@ -517,7 +517,7 @@ vfloat64m2_t test_vfsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsub_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_tum( @@ -526,7 +526,7 @@ vfloat64m4_t test_vfsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsub_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_tum( @@ -535,7 +535,7 @@ vfloat64m4_t test_vfsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsub_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_tum( @@ -544,7 +544,7 @@ vfloat64m8_t test_vfsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsub_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_tumu( @@ -553,7 +553,7 @@ vfloat64m8_t test_vfsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsub_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_tumu( @@ -562,7 +562,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_tumu( @@ -571,7 +571,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsub_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_tumu( @@ -580,7 +580,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_tumu( @@ -589,7 +589,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsub_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_tumu( @@ -598,7 +598,7 @@ vfloat16m1_t test_vfsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_tumu( @@ -607,7 +607,7 @@ vfloat16m1_t test_vfsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsub_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_tumu( @@ -616,7 +616,7 @@ vfloat16m2_t test_vfsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_tumu( @@ -625,7 +625,7 @@ vfloat16m2_t test_vfsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsub_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_tumu( @@ -634,7 +634,7 @@ vfloat16m4_t test_vfsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_tumu( @@ -643,7 +643,7 @@ vfloat16m4_t test_vfsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsub_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_tumu( @@ -652,7 +652,7 @@ vfloat16m8_t test_vfsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat16m8_t test_vfsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsub_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsub_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsub_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_tumu( @@ -688,7 +688,7 @@ vfloat32m1_t test_vfsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsub_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_tumu( @@ -697,7 +697,7 @@ vfloat32m1_t test_vfsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsub_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_tumu( @@ -706,7 +706,7 @@ vfloat32m2_t test_vfsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsub_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_tumu( @@ -715,7 +715,7 @@ vfloat32m2_t test_vfsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsub_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_tumu( @@ -724,7 +724,7 @@ vfloat32m4_t test_vfsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsub_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_tumu( @@ -733,7 +733,7 @@ vfloat32m4_t test_vfsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsub_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_tumu( @@ -742,7 +742,7 @@ vfloat32m8_t test_vfsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsub_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_tumu( @@ -751,7 +751,7 @@ vfloat32m8_t test_vfsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsub_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_tumu( @@ -760,7 +760,7 @@ vfloat64m1_t test_vfsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsub_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_tumu( @@ -769,7 +769,7 @@ vfloat64m1_t test_vfsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsub_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_tumu( @@ -778,7 +778,7 @@ vfloat64m2_t test_vfsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsub_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_tumu( @@ -787,7 +787,7 @@ vfloat64m2_t test_vfsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsub_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_tumu( @@ -796,7 +796,7 @@ vfloat64m4_t test_vfsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsub_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_tumu( @@ -805,7 +805,7 @@ vfloat64m4_t test_vfsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsub_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_tumu( @@ -814,7 +814,7 @@ vfloat64m8_t test_vfsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsub_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_mu( @@ -823,7 +823,7 @@ vfloat64m8_t test_vfsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsub_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_mu( @@ -832,7 +832,7 @@ vfloat16mf4_t test_vfsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_mu( @@ -841,7 +841,7 @@ vfloat16mf4_t test_vfsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsub_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_mu( @@ -850,7 +850,7 @@ vfloat16mf2_t test_vfsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_mu( @@ -859,7 +859,7 @@ vfloat16mf2_t test_vfsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsub_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_mu( @@ -868,7 +868,7 @@ vfloat16m1_t test_vfsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_mu( @@ -877,7 +877,7 @@ vfloat16m1_t test_vfsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsub_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_mu( @@ -886,7 +886,7 @@ vfloat16m2_t test_vfsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_mu( @@ -895,7 +895,7 @@ vfloat16m2_t test_vfsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsub_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_mu( @@ -904,7 +904,7 @@ vfloat16m4_t test_vfsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_mu( @@ -913,7 +913,7 @@ vfloat16m4_t test_vfsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsub_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_mu( @@ -922,7 +922,7 @@ vfloat16m8_t test_vfsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_mu( @@ -931,7 +931,7 @@ vfloat16m8_t test_vfsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsub_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_mu( @@ -940,7 +940,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsub_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_mu( @@ -949,7 +949,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsub_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_mu( @@ -958,7 +958,7 @@ vfloat32m1_t test_vfsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsub_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_mu( @@ -967,7 +967,7 @@ vfloat32m1_t test_vfsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsub_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_mu( @@ -976,7 +976,7 @@ vfloat32m2_t test_vfsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_mu( @@ -985,7 +985,7 @@ vfloat32m2_t test_vfsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsub_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_mu( @@ -994,7 +994,7 @@ vfloat32m4_t test_vfsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_mu( @@ -1003,7 +1003,7 @@ vfloat32m4_t test_vfsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsub_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_mu( @@ -1012,7 +1012,7 @@ vfloat32m8_t test_vfsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_mu( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsub_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_mu( @@ -1030,7 +1030,7 @@ vfloat64m1_t test_vfsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsub_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_mu( @@ -1039,7 +1039,7 @@ vfloat64m1_t test_vfsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsub_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_mu( @@ -1048,7 +1048,7 @@ vfloat64m2_t test_vfsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_mu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vfsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsub_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_mu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vfsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_mu( @@ -1075,7 +1075,7 @@ vfloat64m4_t test_vfsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsub_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_mu( @@ -1084,6 +1084,6 @@ vfloat64m8_t test_vfsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwadd.c index c7f57726212a..fef538c1bf73 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwadd.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_tu( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_tu( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_wv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_tu( @@ -40,7 +40,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_tu( @@ -49,7 +49,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_tu( @@ -58,7 +58,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_tu( @@ -67,7 +67,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_wv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_tu( @@ -103,7 +103,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_wv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_tu( @@ -112,7 +112,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_tu( @@ -121,7 +121,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_tu( @@ -130,7 +130,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_tu( @@ -139,7 +139,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_wv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_tu( @@ -148,7 +148,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_tu( @@ -157,7 +157,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_tu( @@ -166,7 +166,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_tu( @@ -175,7 +175,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_wv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_tu( @@ -184,7 +184,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tu( @@ -193,7 +193,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tu( @@ -202,7 +202,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_wv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_wv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_tu( @@ -256,7 +256,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_tu( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_tu( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_tu( @@ -283,7 +283,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_wv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_tu( @@ -292,7 +292,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_tu( @@ -301,7 +301,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_tu( @@ -310,7 +310,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_tu( @@ -319,7 +319,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_wv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_tu( @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_tum( @@ -337,7 +337,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_tum( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_tum( @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_wv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_tum( @@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_tum( @@ -373,7 +373,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_tum( @@ -382,7 +382,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_tum( @@ -391,7 +391,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_wv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_tum( @@ -400,7 +400,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_tum( @@ -409,7 +409,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_tum( @@ -418,7 +418,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_wv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_wv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_tum( @@ -472,7 +472,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_tum( @@ -481,7 +481,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_tum( @@ -490,7 +490,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_tum( @@ -499,7 +499,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_wv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_tum( @@ -508,7 +508,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tum( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tum( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tum( @@ -535,7 +535,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_wv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tum( @@ -544,7 +544,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_tum( @@ -553,7 +553,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_tum( @@ -562,7 +562,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_tum( @@ -571,7 +571,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_wv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_tum( @@ -580,7 +580,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_tum( @@ -589,7 +589,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_tum( @@ -598,7 +598,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_tum( @@ -607,7 +607,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_wv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_tum( @@ -616,7 +616,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_tum( @@ -625,7 +625,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_tum( @@ -634,7 +634,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_tum( @@ -643,7 +643,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_wv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_tum( @@ -652,7 +652,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_wv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_tumu( @@ -688,7 +688,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_tumu( @@ -697,7 +697,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_tumu( @@ -706,7 +706,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_tumu( @@ -715,7 +715,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_wv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_tumu( @@ -724,7 +724,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_tumu( @@ -733,7 +733,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_tumu( @@ -742,7 +742,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_tumu( @@ -751,7 +751,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_wv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_tumu( @@ -760,7 +760,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_tumu( @@ -769,7 +769,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_tumu( @@ -778,7 +778,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_tumu( @@ -787,7 +787,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_wv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_tumu( @@ -796,7 +796,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_tumu( @@ -805,7 +805,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_tumu( @@ -814,7 +814,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_tumu( @@ -823,7 +823,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_wv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_tumu( @@ -832,7 +832,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_tumu( @@ -841,7 +841,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_tumu( @@ -850,7 +850,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_tumu( @@ -859,7 +859,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_wv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_tumu( @@ -868,7 +868,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_tumu( @@ -877,7 +877,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_tumu( @@ -886,7 +886,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_tumu( @@ -895,7 +895,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_wv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_tumu( @@ -904,7 +904,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_tumu( @@ -913,7 +913,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_tumu( @@ -922,7 +922,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_tumu( @@ -931,7 +931,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_wv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_tumu( @@ -940,7 +940,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_tumu( @@ -949,7 +949,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_tumu( @@ -958,7 +958,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_tumu( @@ -967,7 +967,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_wv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_tumu( @@ -976,7 +976,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_mu( @@ -985,7 +985,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_mu( @@ -994,7 +994,7 @@ vfloat32mf2_t test_vfwadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_mu( @@ -1003,7 +1003,7 @@ vfloat32mf2_t test_vfwadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_wv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_mu( @@ -1012,7 +1012,7 @@ vfloat32mf2_t test_vfwadd_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwadd_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_mu( @@ -1021,7 +1021,7 @@ vfloat32mf2_t test_vfwadd_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_mu( @@ -1030,7 +1030,7 @@ vfloat32m1_t test_vfwadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_mu( @@ -1039,7 +1039,7 @@ vfloat32m1_t test_vfwadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_wv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_mu( @@ -1048,7 +1048,7 @@ vfloat32m1_t test_vfwadd_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwadd_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_mu( @@ -1057,7 +1057,7 @@ vfloat32m1_t test_vfwadd_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_mu( @@ -1066,7 +1066,7 @@ vfloat32m2_t test_vfwadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_mu( @@ -1075,7 +1075,7 @@ vfloat32m2_t test_vfwadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_wv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_mu( @@ -1084,7 +1084,7 @@ vfloat32m2_t test_vfwadd_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwadd_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_mu( @@ -1093,7 +1093,7 @@ vfloat32m2_t test_vfwadd_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_mu( @@ -1102,7 +1102,7 @@ vfloat32m4_t test_vfwadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_mu( @@ -1111,7 +1111,7 @@ vfloat32m4_t test_vfwadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_wv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_mu( @@ -1120,7 +1120,7 @@ vfloat32m4_t test_vfwadd_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwadd_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_mu( @@ -1129,7 +1129,7 @@ vfloat32m4_t test_vfwadd_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_mu( @@ -1138,7 +1138,7 @@ vfloat32m8_t test_vfwadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_mu( @@ -1147,7 +1147,7 @@ vfloat32m8_t test_vfwadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_wv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_mu( @@ -1156,7 +1156,7 @@ vfloat32m8_t test_vfwadd_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwadd_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_mu( @@ -1165,7 +1165,7 @@ vfloat32m8_t test_vfwadd_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_mu( @@ -1174,7 +1174,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_mu( @@ -1183,7 +1183,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_wv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_mu( @@ -1192,7 +1192,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_mu( @@ -1201,7 +1201,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_mu( @@ -1210,7 +1210,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_mu( @@ -1219,7 +1219,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_wv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_mu( @@ -1228,7 +1228,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_mu( @@ -1237,7 +1237,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_mu( @@ -1246,7 +1246,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_mu( @@ -1255,7 +1255,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_wv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_mu( @@ -1264,7 +1264,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_mu( @@ -1273,7 +1273,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_mu( @@ -1282,7 +1282,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_mu( @@ -1291,7 +1291,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_wv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_mu( @@ -1300,6 +1300,6 @@ vfloat64m8_t test_vfwadd_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwadd_wf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt.c index 3cf427e09368..109b70a2de25 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint8mf8_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint8mf4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint8mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vfwcvt_f_x_v_f16m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint8m1_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint8m2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vfwcvt_f_x_v_f16m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint8m4_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_tu( @@ -76,7 +76,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_tu( @@ -85,7 +85,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_tu( @@ -94,7 +94,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint8mf2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_tu( @@ -103,7 +103,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint8m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint8m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint8m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_x_f_v_i32mf2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tu( @@ -130,7 +130,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32mf2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tu( @@ -139,7 +139,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tu( @@ -148,7 +148,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tu( @@ -157,7 +157,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_x_f_v_i32m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tu( @@ -166,7 +166,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tu( @@ -175,7 +175,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tu( @@ -184,7 +184,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tu( @@ -193,7 +193,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_x_f_v_i32m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tu( @@ -202,7 +202,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tu( @@ -211,7 +211,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32mf2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tu( @@ -220,7 +220,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32mf2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tu( @@ -229,7 +229,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tu( @@ -238,7 +238,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tu( @@ -247,7 +247,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tu( @@ -256,7 +256,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tu( @@ -265,7 +265,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tu( @@ -274,7 +274,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tu( @@ -283,7 +283,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tu( @@ -292,7 +292,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tu( @@ -301,7 +301,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f32mf2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_tu( @@ -310,7 +310,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint16mf4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_tu( @@ -319,7 +319,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint16mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vfwcvt_f_x_v_f32m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_tu( @@ -328,7 +328,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint16m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_tu( @@ -337,7 +337,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint16m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vfwcvt_f_x_v_f32m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_tu( @@ -346,7 +346,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint16m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32mf2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_tu( @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_tu( @@ -364,7 +364,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint16mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_tu( @@ -373,7 +373,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint16m1_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_tu( @@ -382,7 +382,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint16m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_tu( @@ -391,7 +391,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint16m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_f_f_v_f32mf2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_tu( @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_tu( @@ -409,7 +409,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_f_f_v_f32m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_tu( @@ -418,7 +418,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_tu( @@ -427,7 +427,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_f_f_v_f32m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tu( @@ -436,7 +436,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tu( @@ -445,7 +445,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tu( @@ -454,7 +454,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_x_f_v_i64m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tu( @@ -463,7 +463,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tu( @@ -472,7 +472,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tu( @@ -481,7 +481,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tu( @@ -490,7 +490,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_x_f_v_i64m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tu( @@ -499,7 +499,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tu( @@ -508,7 +508,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tu( @@ -517,7 +517,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tu( @@ -526,7 +526,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tu( @@ -535,7 +535,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tu( @@ -544,7 +544,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tu( @@ -553,7 +553,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tu( @@ -562,7 +562,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tu( @@ -571,7 +571,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tu( @@ -580,7 +580,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_tu( @@ -589,7 +589,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint32mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vfwcvt_f_x_v_f64m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_tu( @@ -598,7 +598,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint32m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_tu( @@ -607,7 +607,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint32m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vfwcvt_f_x_v_f64m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tu( @@ -616,7 +616,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint32m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_tu( @@ -625,7 +625,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint32mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_tu( @@ -634,7 +634,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint32m1_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_tu( @@ -643,7 +643,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint32m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tu( @@ -652,7 +652,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint32m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m1_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_tu( @@ -661,7 +661,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_f_f_v_f64m2_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_tu( @@ -670,7 +670,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m4_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_tu( @@ -679,7 +679,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_f_f_v_f64m8_tu(maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_tum( @@ -688,7 +688,7 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_tum( @@ -697,7 +697,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_tum( @@ -706,7 +706,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_tum( @@ -715,7 +715,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vfwcvt_f_x_v_f16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_tum( @@ -724,7 +724,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_tum( @@ -733,7 +733,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vfwcvt_f_x_v_f16m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_tum( @@ -742,7 +742,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_tum( @@ -751,7 +751,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_tum( @@ -760,7 +760,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_tum( @@ -769,7 +769,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_tum( @@ -778,7 +778,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_tum( @@ -787,7 +787,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_tum( @@ -796,7 +796,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tum( @@ -805,7 +805,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tum( @@ -814,7 +814,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tum( @@ -823,7 +823,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tum( @@ -832,7 +832,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_x_f_v_i32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tum( @@ -841,7 +841,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tum( @@ -850,7 +850,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tum( @@ -859,7 +859,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tum( @@ -868,7 +868,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_x_f_v_i32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tum( @@ -877,7 +877,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tum( @@ -886,7 +886,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tum( @@ -895,7 +895,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tum( @@ -904,7 +904,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tum( @@ -913,7 +913,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tum( @@ -922,7 +922,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tum( @@ -931,7 +931,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tum( @@ -940,7 +940,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tum( @@ -949,7 +949,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tum( @@ -958,7 +958,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tum( @@ -967,7 +967,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tum( @@ -976,7 +976,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_tum( @@ -985,7 +985,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_tum( @@ -994,7 +994,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vfwcvt_f_x_v_f32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_tum( @@ -1003,7 +1003,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_tum( @@ -1012,7 +1012,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vfwcvt_f_x_v_f32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_tum( @@ -1021,7 +1021,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_tum( @@ -1030,7 +1030,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_tum( @@ -1039,7 +1039,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_tum( @@ -1048,7 +1048,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_tum( @@ -1057,7 +1057,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_tum( @@ -1066,7 +1066,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_f_f_v_f32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_tum( @@ -1075,7 +1075,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_tum( @@ -1084,7 +1084,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_f_f_v_f32m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_tum( @@ -1093,7 +1093,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_tum( @@ -1102,7 +1102,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_f_f_v_f32m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tum( @@ -1111,7 +1111,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tum( @@ -1120,7 +1120,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tum( @@ -1129,7 +1129,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_x_f_v_i64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tum( @@ -1138,7 +1138,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tum( @@ -1147,7 +1147,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tum( @@ -1156,7 +1156,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tum( @@ -1165,7 +1165,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_x_f_v_i64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tum( @@ -1174,7 +1174,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tum( @@ -1183,7 +1183,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tum( @@ -1192,7 +1192,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tum( @@ -1201,7 +1201,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tum( @@ -1210,7 +1210,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tum( @@ -1219,7 +1219,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tum( @@ -1228,7 +1228,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tum( @@ -1237,7 +1237,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tum( @@ -1246,7 +1246,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tum( @@ -1255,7 +1255,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_tum( @@ -1264,7 +1264,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vfwcvt_f_x_v_f64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_tum( @@ -1273,7 +1273,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_tum( @@ -1282,7 +1282,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vfwcvt_f_x_v_f64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tum( @@ -1291,7 +1291,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_tum( @@ -1300,7 +1300,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_tum( @@ -1309,7 +1309,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_tum( @@ -1318,7 +1318,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tum( @@ -1327,7 +1327,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m1_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_tum( @@ -1336,7 +1336,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_f_f_v_f64m2_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_tum( @@ -1345,7 +1345,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m4_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_tum( @@ -1354,7 +1354,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_f_f_v_f64m8_tum(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_tumu( @@ -1363,7 +1363,7 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_tumu( @@ -1372,7 +1372,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_tumu( @@ -1381,7 +1381,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_tumu( @@ -1390,7 +1390,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vfwcvt_f_x_v_f16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_tumu( @@ -1399,7 +1399,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_tumu( @@ -1408,7 +1408,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vfwcvt_f_x_v_f16m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_tumu( @@ -1417,7 +1417,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_tumu( @@ -1426,7 +1426,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_tumu( @@ -1435,7 +1435,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_tumu( @@ -1444,7 +1444,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_tumu( @@ -1453,7 +1453,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_tumu( @@ -1462,7 +1462,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_tumu( @@ -1471,7 +1471,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tumu( @@ -1480,7 +1480,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tumu( @@ -1489,7 +1489,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tumu( @@ -1498,7 +1498,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tumu( @@ -1507,7 +1507,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tumu( @@ -1516,7 +1516,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tumu( @@ -1525,7 +1525,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tumu( @@ -1534,7 +1534,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tumu( @@ -1543,7 +1543,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tumu( @@ -1552,7 +1552,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tumu( @@ -1561,7 +1561,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tumu( @@ -1570,7 +1570,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tumu( @@ -1579,7 +1579,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tumu( @@ -1588,7 +1588,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tumu( @@ -1597,7 +1597,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tumu( @@ -1606,7 +1606,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tumu( @@ -1615,7 +1615,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tumu( @@ -1624,7 +1624,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tumu( @@ -1633,7 +1633,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tumu( @@ -1642,7 +1642,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tumu( @@ -1651,7 +1651,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_tumu( @@ -1660,7 +1660,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_tumu( @@ -1669,7 +1669,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vfwcvt_f_x_v_f32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_tumu( @@ -1678,7 +1678,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_tumu( @@ -1687,7 +1687,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vfwcvt_f_x_v_f32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_tumu( @@ -1696,7 +1696,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_tumu( @@ -1705,7 +1705,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_tumu( @@ -1714,7 +1714,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_tumu( @@ -1723,7 +1723,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_tumu( @@ -1732,7 +1732,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_tumu( @@ -1741,7 +1741,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_f_f_v_f32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_tumu( @@ -1750,7 +1750,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_tumu( @@ -1759,7 +1759,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_f_f_v_f32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_tumu( @@ -1768,7 +1768,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_tumu( @@ -1777,7 +1777,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_f_f_v_f32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_tumu( @@ -1786,7 +1786,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tumu( @@ -1795,7 +1795,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tumu( @@ -1804,7 +1804,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tumu( @@ -1813,7 +1813,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tumu( @@ -1822,7 +1822,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tumu( @@ -1831,7 +1831,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tumu( @@ -1840,7 +1840,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tumu( @@ -1849,7 +1849,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tumu( @@ -1858,7 +1858,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tumu( @@ -1867,7 +1867,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tumu( @@ -1876,7 +1876,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tumu( @@ -1885,7 +1885,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tumu( @@ -1894,7 +1894,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tumu( @@ -1903,7 +1903,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tumu( @@ -1912,7 +1912,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tumu( @@ -1921,7 +1921,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tumu( @@ -1930,7 +1930,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_tumu( @@ -1939,7 +1939,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vfwcvt_f_x_v_f64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_tumu( @@ -1948,7 +1948,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_tumu( @@ -1957,7 +1957,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vfwcvt_f_x_v_f64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_tumu( @@ -1966,7 +1966,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_tumu( @@ -1975,7 +1975,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_tumu( @@ -1984,7 +1984,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_tumu( @@ -1993,7 +1993,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_tumu( @@ -2002,7 +2002,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_tumu( @@ -2011,7 +2011,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_f_f_v_f64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_tumu( @@ -2020,7 +2020,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_tumu( @@ -2029,7 +2029,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_f_f_v_f64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_mu( @@ -2038,7 +2038,7 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_mu( @@ -2047,7 +2047,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_mu( @@ -2056,7 +2056,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_mu( @@ -2065,7 +2065,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vfwcvt_f_x_v_f16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_mu( @@ -2074,7 +2074,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_mu( @@ -2083,7 +2083,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vfwcvt_f_x_v_f16m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f16m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_mu( @@ -2092,7 +2092,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_mu( @@ -2101,7 +2101,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_mu( @@ -2110,7 +2110,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_mu( @@ -2119,7 +2119,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_mu( @@ -2128,7 +2128,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_mu( @@ -2137,7 +2137,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f16m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_mu( @@ -2146,7 +2146,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_mu( @@ -2155,7 +2155,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_mu( @@ -2164,7 +2164,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_mu( @@ -2173,7 +2173,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_mu( @@ -2182,7 +2182,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_x_f_v_i32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_mu( @@ -2191,7 +2191,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_mu( @@ -2200,7 +2200,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_mu( @@ -2209,7 +2209,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_mu( @@ -2218,7 +2218,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_x_f_v_i32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_mu( @@ -2227,7 +2227,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_mu( @@ -2236,7 +2236,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_mu( @@ -2245,7 +2245,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_mu( @@ -2254,7 +2254,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_mu( @@ -2263,7 +2263,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_mu( @@ -2272,7 +2272,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_mu( @@ -2281,7 +2281,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_mu( @@ -2290,7 +2290,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_mu( @@ -2299,7 +2299,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_mu( @@ -2308,7 +2308,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_mu( @@ -2317,7 +2317,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_mu( @@ -2326,7 +2326,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_mu( @@ -2335,7 +2335,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_mu( @@ -2344,7 +2344,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vfwcvt_f_x_v_f32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_mu( @@ -2353,7 +2353,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vfwcvt_f_x_v_f32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_mu( @@ -2362,7 +2362,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vfwcvt_f_x_v_f32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_mu( @@ -2371,7 +2371,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_mu( @@ -2380,7 +2380,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_mu( @@ -2389,7 +2389,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_mu( @@ -2398,7 +2398,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_mu( @@ -2407,7 +2407,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_mu( @@ -2416,7 +2416,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_f_f_v_f32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_mu( @@ -2425,7 +2425,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_mu( @@ -2434,7 +2434,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_f_f_v_f32m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_mu( @@ -2443,7 +2443,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_mu( @@ -2452,7 +2452,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_f_f_v_f32m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_mu( @@ -2461,7 +2461,7 @@ vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_mu( @@ -2470,7 +2470,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_mu( @@ -2479,7 +2479,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_x_f_v_i64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_mu( @@ -2488,7 +2488,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_mu( @@ -2497,7 +2497,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_x_f_v_i64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_mu( @@ -2506,7 +2506,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_mu( @@ -2515,7 +2515,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_x_f_v_i64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_x_f_v_i64m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_mu( @@ -2524,7 +2524,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_x_f_v_i64m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_mu( @@ -2533,7 +2533,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_mu( @@ -2542,7 +2542,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_mu( @@ -2551,7 +2551,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_mu( @@ -2560,7 +2560,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_mu( @@ -2569,7 +2569,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_mu( @@ -2578,7 +2578,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_mu( @@ -2587,7 +2587,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_mu( @@ -2596,7 +2596,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_mu( @@ -2605,7 +2605,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_mu( @@ -2614,7 +2614,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vfwcvt_f_x_v_f64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_mu( @@ -2623,7 +2623,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_mu( @@ -2632,7 +2632,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vfwcvt_f_x_v_f64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_x_v_f64m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_mu( @@ -2641,7 +2641,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_mu( @@ -2650,7 +2650,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_mu( @@ -2659,7 +2659,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_mu( @@ -2668,7 +2668,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_xu_v_f64m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_mu( @@ -2677,7 +2677,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m1_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_mu( @@ -2686,7 +2686,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_f_f_v_f64m2_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_mu( @@ -2695,7 +2695,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m4_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_mu( @@ -2704,6 +2704,6 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_f_f_v_f64m8_mu(mask, maskedoff, src, vl); + return __riscv_vfwcvt_f_f_v_f64m8_mu(mask, maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc.c index 48f5206aff25..b0363ab30cc2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_tu( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vf_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_tu( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_tu( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vf_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_tu( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_tu( @@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vf_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_tu( @@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_tu( @@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vf_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_tu( @@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_tu( @@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vf_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tu( @@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tu( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vf_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vf_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_tu( @@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_tu( @@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vf_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_tu( @@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_tu( @@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vf_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_tum( @@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_tum( @@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_tum( @@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_tum( @@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vf_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_tum( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vf_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vf_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_tum( @@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_tum( @@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vf_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tum( @@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tum( @@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vf_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_tum( @@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_tum( @@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vf_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_tum( @@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_tum( @@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vf_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_tum( @@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_tum( @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vf_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_tumu( @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_tumu( @@ -364,7 +364,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_tumu( @@ -373,7 +373,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_tumu( @@ -382,7 +382,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_tumu( @@ -391,7 +391,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_tumu( @@ -400,7 +400,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_tumu( @@ -409,7 +409,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_tumu( @@ -418,7 +418,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_tumu( @@ -427,7 +427,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_tumu( @@ -436,7 +436,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_tumu( @@ -445,7 +445,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_tumu( @@ -454,7 +454,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_tumu( @@ -463,7 +463,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_tumu( @@ -472,7 +472,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_tumu( @@ -481,7 +481,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_tumu( @@ -490,7 +490,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_mu( @@ -499,7 +499,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_mu( @@ -508,7 +508,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmacc_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_mu( @@ -517,7 +517,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_mu( @@ -526,7 +526,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmacc_vf_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_mu( @@ -535,7 +535,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_mu( @@ -544,7 +544,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmacc_vf_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_mu( @@ -553,7 +553,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_mu( @@ -562,7 +562,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmacc_vf_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_mu( @@ -571,7 +571,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_mu( @@ -580,7 +580,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmacc_vf_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_mu( @@ -589,7 +589,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_mu( @@ -598,7 +598,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmacc_vf_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_mu( @@ -607,7 +607,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_mu( @@ -616,7 +616,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmacc_vf_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_mu( @@ -625,7 +625,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_mu( @@ -634,7 +634,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmacc_vf_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_mu( @@ -643,7 +643,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_mu( @@ -652,6 +652,6 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmacc_vf_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmacc_vf_f64m8_mu(mask, vd, vs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac.c index 53278c6ec2a5..8c473979d9d2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmsac.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_tu( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vf_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_tu( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_tu( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vf_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_tu( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_tu( @@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vf_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_tu( @@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_tu( @@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vf_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_tu( @@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_tu( @@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vf_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tu( @@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tu( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vf_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vf_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_tu( @@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_tu( @@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vf_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_tu( @@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_tu( @@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vf_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_tum( @@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_tum( @@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_tum( @@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_tum( @@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vf_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_tum( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vf_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vf_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_tum( @@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_tum( @@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vf_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tum( @@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tum( @@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vf_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_tum( @@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_tum( @@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vf_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_tum( @@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_tum( @@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vf_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_tum( @@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_tum( @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vf_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_tumu( @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_tumu( @@ -364,7 +364,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_tumu( @@ -373,7 +373,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_tumu( @@ -382,7 +382,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_tumu( @@ -391,7 +391,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_tumu( @@ -400,7 +400,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_tumu( @@ -409,7 +409,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_tumu( @@ -418,7 +418,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_tumu( @@ -427,7 +427,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_tumu( @@ -436,7 +436,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_tumu( @@ -445,7 +445,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_tumu( @@ -454,7 +454,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_tumu( @@ -463,7 +463,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_tumu( @@ -472,7 +472,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_tumu( @@ -481,7 +481,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_tumu( @@ -490,7 +490,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_mu( @@ -499,7 +499,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_mu( @@ -508,7 +508,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwmsac_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_mu( @@ -517,7 +517,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_mu( @@ -526,7 +526,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwmsac_vf_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_mu( @@ -535,7 +535,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_mu( @@ -544,7 +544,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwmsac_vf_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_mu( @@ -553,7 +553,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_mu( @@ -562,7 +562,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwmsac_vf_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_mu( @@ -571,7 +571,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_mu( @@ -580,7 +580,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwmsac_vf_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_mu( @@ -589,7 +589,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_mu( @@ -598,7 +598,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwmsac_vf_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_mu( @@ -607,7 +607,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_mu( @@ -616,7 +616,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwmsac_vf_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_mu( @@ -625,7 +625,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_mu( @@ -634,7 +634,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwmsac_vf_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_mu( @@ -643,7 +643,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_mu( @@ -652,6 +652,6 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwmsac_vf_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwmsac_vf_f64m8_mu(mask, vd, vs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmul.c index 55592b5152ba..7948036f867c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwmul.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwmul_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_tu( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_tu( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwmul_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_tu( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_tu( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwmul_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_tu( @@ -58,7 +58,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_tu( @@ -67,7 +67,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwmul_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_tu( @@ -76,7 +76,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_tu( @@ -85,7 +85,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwmul_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_tu( @@ -94,7 +94,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tu( @@ -103,7 +103,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwmul_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tu( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwmul_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_tu( @@ -139,7 +139,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwmul_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_tu( @@ -148,7 +148,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_tu( @@ -157,7 +157,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwmul_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_tu( @@ -166,7 +166,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_tum( @@ -175,7 +175,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwmul_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_tum( @@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_tum( @@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwmul_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_tum( @@ -202,7 +202,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_tum( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwmul_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwmul_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_tum( @@ -247,7 +247,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwmul_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_tum( @@ -256,7 +256,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tum( @@ -265,7 +265,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwmul_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tum( @@ -274,7 +274,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_tum( @@ -283,7 +283,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwmul_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_tum( @@ -292,7 +292,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_tum( @@ -301,7 +301,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwmul_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_tum( @@ -310,7 +310,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_tum( @@ -319,7 +319,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwmul_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_tum( @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwmul_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_tumu( @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwmul_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_tumu( @@ -364,7 +364,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_tumu( @@ -373,7 +373,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwmul_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_tumu( @@ -382,7 +382,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_tumu( @@ -391,7 +391,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwmul_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_tumu( @@ -400,7 +400,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_tumu( @@ -409,7 +409,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwmul_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_tumu( @@ -418,7 +418,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_tumu( @@ -427,7 +427,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwmul_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_tumu( @@ -436,7 +436,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_tumu( @@ -445,7 +445,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwmul_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_tumu( @@ -454,7 +454,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_tumu( @@ -463,7 +463,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwmul_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_tumu( @@ -472,7 +472,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_tumu( @@ -481,7 +481,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwmul_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_tumu( @@ -490,7 +490,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_mu( @@ -499,7 +499,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwmul_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_mu( @@ -508,7 +508,7 @@ vfloat32mf2_t test_vfwmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_mu( @@ -517,7 +517,7 @@ vfloat32mf2_t test_vfwmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwmul_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_mu( @@ -526,7 +526,7 @@ vfloat32m1_t test_vfwmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_mu( @@ -535,7 +535,7 @@ vfloat32m1_t test_vfwmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwmul_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_mu( @@ -544,7 +544,7 @@ vfloat32m2_t test_vfwmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_mu( @@ -553,7 +553,7 @@ vfloat32m2_t test_vfwmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwmul_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_mu( @@ -562,7 +562,7 @@ vfloat32m4_t test_vfwmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_mu( @@ -571,7 +571,7 @@ vfloat32m4_t test_vfwmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwmul_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_mu( @@ -580,7 +580,7 @@ vfloat32m8_t test_vfwmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_mu( @@ -589,7 +589,7 @@ vfloat32m8_t test_vfwmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwmul_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_mu( @@ -598,7 +598,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_mu( @@ -607,7 +607,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwmul_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_mu( @@ -616,7 +616,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_mu( @@ -625,7 +625,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwmul_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_mu( @@ -634,7 +634,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_mu( @@ -643,7 +643,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwmul_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_mu( @@ -652,6 +652,6 @@ vfloat64m8_t test_vfwmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwmul_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwmul_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc.c index f3a88dfc7de6..64267471e4f0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_tu( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vf_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_tu( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_tu( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_tu( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_tu( @@ -58,7 +58,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vf_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_tu( @@ -67,7 +67,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_tu( @@ -76,7 +76,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_tu( @@ -85,7 +85,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_tu( @@ -94,7 +94,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vf_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tu( @@ -103,7 +103,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tu( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vf_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_tu( @@ -139,7 +139,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_tu( @@ -148,7 +148,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_tu( @@ -157,7 +157,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_tu( @@ -166,7 +166,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vf_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_tum( @@ -175,7 +175,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_tum( @@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_tum( @@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_tum( @@ -202,7 +202,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_tum( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vf_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_tum( @@ -247,7 +247,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_tum( @@ -256,7 +256,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vf_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tum( @@ -265,7 +265,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tum( @@ -274,7 +274,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_tum( @@ -283,7 +283,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_tum( @@ -292,7 +292,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vf_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_tum( @@ -301,7 +301,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_tum( @@ -310,7 +310,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_tum( @@ -319,7 +319,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_tum( @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vf_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_tumu( @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_tumu( @@ -364,7 +364,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_tumu( @@ -373,7 +373,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_tumu( @@ -382,7 +382,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_tumu( @@ -391,7 +391,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_tumu( @@ -400,7 +400,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_tumu( @@ -409,7 +409,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_tumu( @@ -418,7 +418,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_tumu( @@ -427,7 +427,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_tumu( @@ -436,7 +436,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_tumu( @@ -445,7 +445,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_tumu( @@ -454,7 +454,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_tumu( @@ -463,7 +463,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_tumu( @@ -472,7 +472,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_tumu( @@ -481,7 +481,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_tumu( @@ -490,7 +490,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_mu( @@ -499,7 +499,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_mu( @@ -508,7 +508,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmacc_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_mu( @@ -517,7 +517,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_mu( @@ -526,7 +526,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_mu( @@ -535,7 +535,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_mu( @@ -544,7 +544,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmacc_vf_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_mu( @@ -553,7 +553,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_mu( @@ -562,7 +562,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmacc_vf_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_mu( @@ -571,7 +571,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_mu( @@ -580,7 +580,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmacc_vf_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_mu( @@ -589,7 +589,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_mu( @@ -598,7 +598,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_mu( @@ -607,7 +607,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_mu( @@ -616,7 +616,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmacc_vf_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_mu( @@ -625,7 +625,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_mu( @@ -634,7 +634,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmacc_vf_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_mu( @@ -643,7 +643,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_mu( @@ -652,6 +652,6 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmacc_vf_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmacc_vf_f64m8_mu(mask, vd, vs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac.c index 956a5ff38099..3e7a355d8b14 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwnmsac.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_tu( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vf_f32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_tu( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_tu( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_tu( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vv_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_tu( @@ -58,7 +58,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vf_f32m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_tu( @@ -67,7 +67,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_tu( @@ -76,7 +76,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_tu( @@ -85,7 +85,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vv_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_tu( @@ -94,7 +94,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vf_f32m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tu( @@ -103,7 +103,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tu( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m1_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vv_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vf_f64m2_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_tu( @@ -139,7 +139,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_tu( @@ -148,7 +148,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m4_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_tu( @@ -157,7 +157,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vv_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_tu( @@ -166,7 +166,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vf_f64m8_tu(vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_tum( @@ -175,7 +175,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_tum( @@ -184,7 +184,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_tum( @@ -193,7 +193,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, _Flo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_tum( @@ -202,7 +202,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_tum( @@ -211,7 +211,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vf_f32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_tum( @@ -229,7 +229,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_tum( @@ -238,7 +238,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_tum( @@ -247,7 +247,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_tum( @@ -256,7 +256,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vf_f32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tum( @@ -265,7 +265,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tum( @@ -274,7 +274,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_tum( @@ -283,7 +283,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_tum( @@ -292,7 +292,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vf_f64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_tum( @@ -301,7 +301,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_tum( @@ -310,7 +310,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_tum( @@ -319,7 +319,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_tum( @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vf_f64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_tumu( @@ -337,7 +337,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_tumu( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_tumu( @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, _Fl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_tumu( @@ -364,7 +364,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_tumu( @@ -373,7 +373,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_tumu( @@ -382,7 +382,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_tumu( @@ -391,7 +391,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, _Float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_tumu( @@ -400,7 +400,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_tumu( @@ -409,7 +409,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_tumu( @@ -418,7 +418,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_tumu( @@ -427,7 +427,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, _Float1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_tumu( @@ -436,7 +436,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_tumu( @@ -445,7 +445,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_tumu( @@ -454,7 +454,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_tumu( @@ -463,7 +463,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_tumu( @@ -472,7 +472,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_tumu( @@ -481,7 +481,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_tumu( @@ -490,7 +490,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_mu( @@ -499,7 +499,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_mu( @@ -508,7 +508,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return vfwnmsac_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_mu( @@ -517,7 +517,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, _Floa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_mu( @@ -526,7 +526,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_mu( @@ -535,7 +535,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_mu( @@ -544,7 +544,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return vfwnmsac_vf_f32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_mu( @@ -553,7 +553,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_mu( @@ -562,7 +562,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return vfwnmsac_vf_f32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_mu( @@ -571,7 +571,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_mu( @@ -580,7 +580,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return vfwnmsac_vf_f32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_mu( @@ -589,7 +589,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_mu( @@ -598,7 +598,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_mu( @@ -607,7 +607,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_mu( @@ -616,7 +616,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return vfwnmsac_vf_f64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_mu( @@ -625,7 +625,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_mu( @@ -634,7 +634,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return vfwnmsac_vf_f64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_mu( @@ -643,7 +643,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float vs // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_mu( @@ -652,6 +652,6 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return vfwnmsac_vf_f64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vfwnmsac_vf_f64m8_mu(mask, vd, vs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwredosum.c index 27694126314a..858200ce05af 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwredosum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwredosum.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf4_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16mf4_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_tu( @@ -22,7 +22,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16mf2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_tu( @@ -31,7 +31,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m1_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m1_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_tu( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_tu( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m4_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m4_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_tu( @@ -58,7 +58,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m8_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m8_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tu( @@ -67,7 +67,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32mf2_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_tu( @@ -76,7 +76,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m1_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m1_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_tu( @@ -85,7 +85,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m2_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m2_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_tu( @@ -94,7 +94,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m4_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m4_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_tu( @@ -103,7 +103,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m8_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m8_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_tum( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf4_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16mf4_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_tum( @@ -121,7 +121,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_tum( @@ -130,7 +130,7 @@ vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_tum( @@ -139,7 +139,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_tum( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_tum( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredosum_vs_f16m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f16m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tum( @@ -166,7 +166,7 @@ vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32mf2_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32mf2_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_tum( @@ -175,7 +175,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_tum( @@ -184,7 +184,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_tum( @@ -193,7 +193,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_tum( @@ -202,6 +202,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredosum_vs_f32m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredosum_vs_f32m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwredusum.c index e2600e527e65..a833a2942d4b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwredusum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwredusum.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf4_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16mf4_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_tu( @@ -22,7 +22,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16mf2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_tu( @@ -31,7 +31,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m1_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m1_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_tu( @@ -40,7 +40,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m2_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m2_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_tu( @@ -49,7 +49,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m4_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m4_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_tu( @@ -58,7 +58,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m8_f32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m8_f32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tu( @@ -67,7 +67,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32mf2_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_tu( @@ -76,7 +76,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m1_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m1_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_tu( @@ -85,7 +85,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m2_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m2_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_tu( @@ -94,7 +94,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m4_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m4_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_tu( @@ -103,7 +103,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m8_f64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m8_f64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_tum( @@ -112,7 +112,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf4_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16mf4_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_tum( @@ -121,7 +121,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_tum( @@ -130,7 +130,7 @@ vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_tum( @@ -139,7 +139,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_tum( @@ -148,7 +148,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_tum( @@ -157,7 +157,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfwredusum_vs_f16m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f16m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tum( @@ -166,7 +166,7 @@ vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32mf2_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32mf2_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_tum( @@ -175,7 +175,7 @@ vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t ma // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_tum( @@ -184,7 +184,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_tum( @@ -193,7 +193,7 @@ vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_tum( @@ -202,6 +202,6 @@ vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredusum_vs_f32m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vfwredusum_vs_f32m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwsub.c index 0498d18829e7..7397cd098029 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_tu( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_tu( @@ -31,7 +31,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_wv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_tu( @@ -40,7 +40,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_tu( @@ -49,7 +49,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_tu( @@ -58,7 +58,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_tu( @@ -67,7 +67,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_wv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_tu( @@ -103,7 +103,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_wv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_tu( @@ -112,7 +112,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_tu( @@ -121,7 +121,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_tu( @@ -130,7 +130,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_tu( @@ -139,7 +139,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_wv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_tu( @@ -148,7 +148,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_tu( @@ -157,7 +157,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_tu( @@ -166,7 +166,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_tu( @@ -175,7 +175,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_wv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_tu( @@ -184,7 +184,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tu( @@ -193,7 +193,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, _ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tu( @@ -202,7 +202,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_wv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_wv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_tu( @@ -256,7 +256,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_tu( @@ -265,7 +265,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_tu( @@ -274,7 +274,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_tu( @@ -283,7 +283,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_wv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_tu( @@ -292,7 +292,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_tu( @@ -301,7 +301,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_tu( @@ -310,7 +310,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_tu( @@ -319,7 +319,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_wv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_tu( @@ -328,7 +328,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_tum( @@ -337,7 +337,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, f // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_tum( @@ -346,7 +346,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_tum( @@ -355,7 +355,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_wv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_tum( @@ -364,7 +364,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_tum( @@ -373,7 +373,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_tum( @@ -382,7 +382,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_tum( @@ -391,7 +391,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_wv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_tum( @@ -400,7 +400,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_tum( @@ -409,7 +409,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_tum( @@ -418,7 +418,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_tum( @@ -427,7 +427,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_wv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_tum( @@ -436,7 +436,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_tum( @@ -445,7 +445,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_tum( @@ -454,7 +454,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_tum( @@ -463,7 +463,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_wv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_tum( @@ -472,7 +472,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_tum( @@ -481,7 +481,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_tum( @@ -490,7 +490,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_tum( @@ -499,7 +499,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_wv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_tum( @@ -508,7 +508,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tum( @@ -517,7 +517,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tum( @@ -526,7 +526,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tum( @@ -535,7 +535,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_wv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tum( @@ -544,7 +544,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_tum( @@ -553,7 +553,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_tum( @@ -562,7 +562,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_tum( @@ -571,7 +571,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_wv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_tum( @@ -580,7 +580,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_tum( @@ -589,7 +589,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_tum( @@ -598,7 +598,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_tum( @@ -607,7 +607,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_wv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_tum( @@ -616,7 +616,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_tum( @@ -625,7 +625,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_tum( @@ -634,7 +634,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_tum( @@ -643,7 +643,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_wv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_tum( @@ -652,7 +652,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_tumu( @@ -661,7 +661,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_tumu( @@ -670,7 +670,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_tumu( @@ -679,7 +679,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_wv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_tumu( @@ -688,7 +688,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_tumu( @@ -697,7 +697,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_tumu( @@ -706,7 +706,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_tumu( @@ -715,7 +715,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_wv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_tumu( @@ -724,7 +724,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_tumu( @@ -733,7 +733,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_tumu( @@ -742,7 +742,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_tumu( @@ -751,7 +751,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_wv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_tumu( @@ -760,7 +760,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_tumu( @@ -769,7 +769,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_tumu( @@ -778,7 +778,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_tumu( @@ -787,7 +787,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_wv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_tumu( @@ -796,7 +796,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_tumu( @@ -805,7 +805,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_tumu( @@ -814,7 +814,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_tumu( @@ -823,7 +823,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_wv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_tumu( @@ -832,7 +832,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_tumu( @@ -841,7 +841,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_tumu( @@ -850,7 +850,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_tumu( @@ -859,7 +859,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_wv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_tumu( @@ -868,7 +868,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_tumu( @@ -877,7 +877,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_tumu( @@ -886,7 +886,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_tumu( @@ -895,7 +895,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_wv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_tumu( @@ -904,7 +904,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_tumu( @@ -913,7 +913,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_tumu( @@ -922,7 +922,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_tumu( @@ -931,7 +931,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_wv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_tumu( @@ -940,7 +940,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_tumu( @@ -949,7 +949,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_tumu( @@ -958,7 +958,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_tumu( @@ -967,7 +967,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_wv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_tumu( @@ -976,7 +976,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_mu( @@ -985,7 +985,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_mu( @@ -994,7 +994,7 @@ vfloat32mf2_t test_vfwsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_mu( @@ -1003,7 +1003,7 @@ vfloat32mf2_t test_vfwsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_wv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_mu( @@ -1012,7 +1012,7 @@ vfloat32mf2_t test_vfwsub_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwsub_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_mu( @@ -1021,7 +1021,7 @@ vfloat32mf2_t test_vfwsub_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_mu( @@ -1030,7 +1030,7 @@ vfloat32m1_t test_vfwsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_mu( @@ -1039,7 +1039,7 @@ vfloat32m1_t test_vfwsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_wv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_mu( @@ -1048,7 +1048,7 @@ vfloat32m1_t test_vfwsub_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwsub_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_mu( @@ -1057,7 +1057,7 @@ vfloat32m1_t test_vfwsub_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_mu( @@ -1066,7 +1066,7 @@ vfloat32m2_t test_vfwsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_mu( @@ -1075,7 +1075,7 @@ vfloat32m2_t test_vfwsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_wv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_mu( @@ -1084,7 +1084,7 @@ vfloat32m2_t test_vfwsub_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwsub_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_mu( @@ -1093,7 +1093,7 @@ vfloat32m2_t test_vfwsub_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_mu( @@ -1102,7 +1102,7 @@ vfloat32m4_t test_vfwsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_mu( @@ -1111,7 +1111,7 @@ vfloat32m4_t test_vfwsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_wv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_mu( @@ -1120,7 +1120,7 @@ vfloat32m4_t test_vfwsub_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwsub_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_mu( @@ -1129,7 +1129,7 @@ vfloat32m4_t test_vfwsub_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_mu( @@ -1138,7 +1138,7 @@ vfloat32m8_t test_vfwsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_mu( @@ -1147,7 +1147,7 @@ vfloat32m8_t test_vfwsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_wv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_mu( @@ -1156,7 +1156,7 @@ vfloat32m8_t test_vfwsub_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwsub_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_mu( @@ -1165,7 +1165,7 @@ vfloat32m8_t test_vfwsub_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_mu( @@ -1174,7 +1174,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_mu( @@ -1183,7 +1183,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_wv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_mu( @@ -1192,7 +1192,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_mu( @@ -1201,7 +1201,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_mu( @@ -1210,7 +1210,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_mu( @@ -1219,7 +1219,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_wv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_mu( @@ -1228,7 +1228,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_mu( @@ -1237,7 +1237,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_mu( @@ -1246,7 +1246,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_mu( @@ -1255,7 +1255,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_wv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_mu( @@ -1264,7 +1264,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_mu( @@ -1273,7 +1273,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_mu( @@ -1282,7 +1282,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_mu( @@ -1291,7 +1291,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_wv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_mu( @@ -1300,6 +1300,6 @@ vfloat64m8_t test_vfwsub_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vfwsub_wf_f64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vid.c index bf6f3e721fd6..e8b3f7f563aa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vid.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vid.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vid_v_u8mf8_tu(vuint8mf8_t maskedoff, size_t vl) { - return vid_v_u8mf8_tu(maskedoff, vl); + return __riscv_vid_v_u8mf8_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf4_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vid_v_u8mf8_tu(vuint8mf8_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vid_v_u8mf4_tu(vuint8mf4_t maskedoff, size_t vl) { - return vid_v_u8mf4_tu(maskedoff, vl); + return __riscv_vid_v_u8mf4_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf2_tu( @@ -30,7 +30,7 @@ vuint8mf4_t test_vid_v_u8mf4_tu(vuint8mf4_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vid_v_u8mf2_tu(vuint8mf2_t maskedoff, size_t vl) { - return vid_v_u8mf2_tu(maskedoff, vl); + return __riscv_vid_v_u8mf2_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m1_tu( @@ -39,7 +39,7 @@ vuint8mf2_t test_vid_v_u8mf2_tu(vuint8mf2_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vid_v_u8m1_tu(vuint8m1_t maskedoff, size_t vl) { - return vid_v_u8m1_tu(maskedoff, vl); + return __riscv_vid_v_u8m1_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m2_tu( @@ -48,7 +48,7 @@ vuint8m1_t test_vid_v_u8m1_tu(vuint8m1_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vid_v_u8m2_tu(vuint8m2_t maskedoff, size_t vl) { - return vid_v_u8m2_tu(maskedoff, vl); + return __riscv_vid_v_u8m2_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m4_tu( @@ -57,7 +57,7 @@ vuint8m2_t test_vid_v_u8m2_tu(vuint8m2_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vid_v_u8m4_tu(vuint8m4_t maskedoff, size_t vl) { - return vid_v_u8m4_tu(maskedoff, vl); + return __riscv_vid_v_u8m4_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m8_tu( @@ -66,7 +66,7 @@ vuint8m4_t test_vid_v_u8m4_tu(vuint8m4_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vid_v_u8m8_tu(vuint8m8_t maskedoff, size_t vl) { - return vid_v_u8m8_tu(maskedoff, vl); + return __riscv_vid_v_u8m8_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf4_tu( @@ -75,7 +75,7 @@ vuint8m8_t test_vid_v_u8m8_tu(vuint8m8_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vid_v_u16mf4_tu(vuint16mf4_t maskedoff, size_t vl) { - return vid_v_u16mf4_tu(maskedoff, vl); + return __riscv_vid_v_u16mf4_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf2_tu( @@ -84,7 +84,7 @@ vuint16mf4_t test_vid_v_u16mf4_tu(vuint16mf4_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vid_v_u16mf2_tu(vuint16mf2_t maskedoff, size_t vl) { - return vid_v_u16mf2_tu(maskedoff, vl); + return __riscv_vid_v_u16mf2_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m1_tu( @@ -93,7 +93,7 @@ vuint16mf2_t test_vid_v_u16mf2_tu(vuint16mf2_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vid_v_u16m1_tu(vuint16m1_t maskedoff, size_t vl) { - return vid_v_u16m1_tu(maskedoff, vl); + return __riscv_vid_v_u16m1_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m2_tu( @@ -102,7 +102,7 @@ vuint16m1_t test_vid_v_u16m1_tu(vuint16m1_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vid_v_u16m2_tu(vuint16m2_t maskedoff, size_t vl) { - return vid_v_u16m2_tu(maskedoff, vl); + return __riscv_vid_v_u16m2_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m4_tu( @@ -111,7 +111,7 @@ vuint16m2_t test_vid_v_u16m2_tu(vuint16m2_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vid_v_u16m4_tu(vuint16m4_t maskedoff, size_t vl) { - return vid_v_u16m4_tu(maskedoff, vl); + return __riscv_vid_v_u16m4_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m8_tu( @@ -120,7 +120,7 @@ vuint16m4_t test_vid_v_u16m4_tu(vuint16m4_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vid_v_u16m8_tu(vuint16m8_t maskedoff, size_t vl) { - return vid_v_u16m8_tu(maskedoff, vl); + return __riscv_vid_v_u16m8_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32mf2_tu( @@ -129,7 +129,7 @@ vuint16m8_t test_vid_v_u16m8_tu(vuint16m8_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t maskedoff, size_t vl) { - return vid_v_u32mf2_tu(maskedoff, vl); + return __riscv_vid_v_u32mf2_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m1_tu( @@ -138,7 +138,7 @@ vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vid_v_u32m1_tu(vuint32m1_t maskedoff, size_t vl) { - return vid_v_u32m1_tu(maskedoff, vl); + return __riscv_vid_v_u32m1_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m2_tu( @@ -147,7 +147,7 @@ vuint32m1_t test_vid_v_u32m1_tu(vuint32m1_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vid_v_u32m2_tu(vuint32m2_t maskedoff, size_t vl) { - return vid_v_u32m2_tu(maskedoff, vl); + return __riscv_vid_v_u32m2_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m4_tu( @@ -156,7 +156,7 @@ vuint32m2_t test_vid_v_u32m2_tu(vuint32m2_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vid_v_u32m4_tu(vuint32m4_t maskedoff, size_t vl) { - return vid_v_u32m4_tu(maskedoff, vl); + return __riscv_vid_v_u32m4_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m8_tu( @@ -165,7 +165,7 @@ vuint32m4_t test_vid_v_u32m4_tu(vuint32m4_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vid_v_u32m8_tu(vuint32m8_t maskedoff, size_t vl) { - return vid_v_u32m8_tu(maskedoff, vl); + return __riscv_vid_v_u32m8_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m1_tu( @@ -174,7 +174,7 @@ vuint32m8_t test_vid_v_u32m8_tu(vuint32m8_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vid_v_u64m1_tu(vuint64m1_t maskedoff, size_t vl) { - return vid_v_u64m1_tu(maskedoff, vl); + return __riscv_vid_v_u64m1_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m2_tu( @@ -183,7 +183,7 @@ vuint64m1_t test_vid_v_u64m1_tu(vuint64m1_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vid_v_u64m2_tu(vuint64m2_t maskedoff, size_t vl) { - return vid_v_u64m2_tu(maskedoff, vl); + return __riscv_vid_v_u64m2_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m4_tu( @@ -192,7 +192,7 @@ vuint64m2_t test_vid_v_u64m2_tu(vuint64m2_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vid_v_u64m4_tu(vuint64m4_t maskedoff, size_t vl) { - return vid_v_u64m4_tu(maskedoff, vl); + return __riscv_vid_v_u64m4_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m8_tu( @@ -201,7 +201,7 @@ vuint64m4_t test_vid_v_u64m4_tu(vuint64m4_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vid_v_u64m8_tu(vuint64m8_t maskedoff, size_t vl) { - return vid_v_u64m8_tu(maskedoff, vl); + return __riscv_vid_v_u64m8_tu(maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf8_tum( @@ -210,7 +210,7 @@ vuint64m8_t test_vid_v_u64m8_tu(vuint64m8_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vid_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { - return vid_v_u8mf8_tum(mask, maskedoff, vl); + return __riscv_vid_v_u8mf8_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf4_tum( @@ -219,7 +219,7 @@ vuint8mf8_t test_vid_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vid_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { - return vid_v_u8mf4_tum(mask, maskedoff, vl); + return __riscv_vid_v_u8mf4_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf2_tum( @@ -228,7 +228,7 @@ vuint8mf4_t test_vid_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vid_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { - return vid_v_u8mf2_tum(mask, maskedoff, vl); + return __riscv_vid_v_u8mf2_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m1_tum( @@ -237,7 +237,7 @@ vuint8mf2_t test_vid_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vid_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { - return vid_v_u8m1_tum(mask, maskedoff, vl); + return __riscv_vid_v_u8m1_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m2_tum( @@ -246,7 +246,7 @@ vuint8m1_t test_vid_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vid_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { - return vid_v_u8m2_tum(mask, maskedoff, vl); + return __riscv_vid_v_u8m2_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m4_tum( @@ -255,7 +255,7 @@ vuint8m2_t test_vid_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vid_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { - return vid_v_u8m4_tum(mask, maskedoff, vl); + return __riscv_vid_v_u8m4_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m8_tum( @@ -264,7 +264,7 @@ vuint8m4_t test_vid_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vid_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { - return vid_v_u8m8_tum(mask, maskedoff, vl); + return __riscv_vid_v_u8m8_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf4_tum( @@ -273,7 +273,7 @@ vuint8m8_t test_vid_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vid_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { - return vid_v_u16mf4_tum(mask, maskedoff, vl); + return __riscv_vid_v_u16mf4_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf2_tum( @@ -282,7 +282,7 @@ vuint16mf4_t test_vid_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vid_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { - return vid_v_u16mf2_tum(mask, maskedoff, vl); + return __riscv_vid_v_u16mf2_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m1_tum( @@ -291,7 +291,7 @@ vuint16mf2_t test_vid_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vid_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { - return vid_v_u16m1_tum(mask, maskedoff, vl); + return __riscv_vid_v_u16m1_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m2_tum( @@ -300,7 +300,7 @@ vuint16m1_t test_vid_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vid_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { - return vid_v_u16m2_tum(mask, maskedoff, vl); + return __riscv_vid_v_u16m2_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m4_tum( @@ -309,7 +309,7 @@ vuint16m2_t test_vid_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vid_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { - return vid_v_u16m4_tum(mask, maskedoff, vl); + return __riscv_vid_v_u16m4_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m8_tum( @@ -318,7 +318,7 @@ vuint16m4_t test_vid_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vid_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { - return vid_v_u16m8_tum(mask, maskedoff, vl); + return __riscv_vid_v_u16m8_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32mf2_tum( @@ -327,7 +327,7 @@ vuint16m8_t test_vid_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vid_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { - return vid_v_u32mf2_tum(mask, maskedoff, vl); + return __riscv_vid_v_u32mf2_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m1_tum( @@ -336,7 +336,7 @@ vuint32mf2_t test_vid_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vid_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { - return vid_v_u32m1_tum(mask, maskedoff, vl); + return __riscv_vid_v_u32m1_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m2_tum( @@ -345,7 +345,7 @@ vuint32m1_t test_vid_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vid_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { - return vid_v_u32m2_tum(mask, maskedoff, vl); + return __riscv_vid_v_u32m2_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m4_tum( @@ -354,7 +354,7 @@ vuint32m2_t test_vid_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vid_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { - return vid_v_u32m4_tum(mask, maskedoff, vl); + return __riscv_vid_v_u32m4_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m8_tum( @@ -363,7 +363,7 @@ vuint32m4_t test_vid_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vid_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { - return vid_v_u32m8_tum(mask, maskedoff, vl); + return __riscv_vid_v_u32m8_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m1_tum( @@ -372,7 +372,7 @@ vuint32m8_t test_vid_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vid_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { - return vid_v_u64m1_tum(mask, maskedoff, vl); + return __riscv_vid_v_u64m1_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m2_tum( @@ -381,7 +381,7 @@ vuint64m1_t test_vid_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vid_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { - return vid_v_u64m2_tum(mask, maskedoff, vl); + return __riscv_vid_v_u64m2_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m4_tum( @@ -390,7 +390,7 @@ vuint64m2_t test_vid_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vid_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { - return vid_v_u64m4_tum(mask, maskedoff, vl); + return __riscv_vid_v_u64m4_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m8_tum( @@ -399,7 +399,7 @@ vuint64m4_t test_vid_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vid_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { - return vid_v_u64m8_tum(mask, maskedoff, vl); + return __riscv_vid_v_u64m8_tum(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf8_tumu( @@ -408,7 +408,7 @@ vuint64m8_t test_vid_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vid_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { - return vid_v_u8mf8_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u8mf8_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf4_tumu( @@ -417,7 +417,7 @@ vuint8mf8_t test_vid_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vid_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { - return vid_v_u8mf4_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u8mf4_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf2_tumu( @@ -426,7 +426,7 @@ vuint8mf4_t test_vid_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vid_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { - return vid_v_u8mf2_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u8mf2_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m1_tumu( @@ -435,7 +435,7 @@ vuint8mf2_t test_vid_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vid_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { - return vid_v_u8m1_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u8m1_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m2_tumu( @@ -444,7 +444,7 @@ vuint8m1_t test_vid_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vid_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { - return vid_v_u8m2_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u8m2_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m4_tumu( @@ -453,7 +453,7 @@ vuint8m2_t test_vid_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vid_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { - return vid_v_u8m4_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u8m4_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m8_tumu( @@ -462,7 +462,7 @@ vuint8m4_t test_vid_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vid_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { - return vid_v_u8m8_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u8m8_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf4_tumu( @@ -471,7 +471,7 @@ vuint8m8_t test_vid_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vid_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { - return vid_v_u16mf4_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u16mf4_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf2_tumu( @@ -480,7 +480,7 @@ vuint16mf4_t test_vid_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vid_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { - return vid_v_u16mf2_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u16mf2_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m1_tumu( @@ -489,7 +489,7 @@ vuint16mf2_t test_vid_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vid_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { - return vid_v_u16m1_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u16m1_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m2_tumu( @@ -498,7 +498,7 @@ vuint16m1_t test_vid_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vid_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { - return vid_v_u16m2_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u16m2_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m4_tumu( @@ -507,7 +507,7 @@ vuint16m2_t test_vid_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vid_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { - return vid_v_u16m4_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u16m4_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m8_tumu( @@ -516,7 +516,7 @@ vuint16m4_t test_vid_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vid_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { - return vid_v_u16m8_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u16m8_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32mf2_tumu( @@ -525,7 +525,7 @@ vuint16m8_t test_vid_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { - return vid_v_u32mf2_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u32mf2_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m1_tumu( @@ -534,7 +534,7 @@ vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vid_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { - return vid_v_u32m1_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u32m1_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m2_tumu( @@ -543,7 +543,7 @@ vuint32m1_t test_vid_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vid_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { - return vid_v_u32m2_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u32m2_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m4_tumu( @@ -552,7 +552,7 @@ vuint32m2_t test_vid_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vid_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { - return vid_v_u32m4_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u32m4_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m8_tumu( @@ -561,7 +561,7 @@ vuint32m4_t test_vid_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vid_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { - return vid_v_u32m8_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u32m8_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m1_tumu( @@ -570,7 +570,7 @@ vuint32m8_t test_vid_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vid_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { - return vid_v_u64m1_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u64m1_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m2_tumu( @@ -579,7 +579,7 @@ vuint64m1_t test_vid_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vid_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { - return vid_v_u64m2_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u64m2_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m4_tumu( @@ -588,7 +588,7 @@ vuint64m2_t test_vid_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vid_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { - return vid_v_u64m4_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u64m4_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m8_tumu( @@ -597,7 +597,7 @@ vuint64m4_t test_vid_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vid_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { - return vid_v_u64m8_tumu(mask, maskedoff, vl); + return __riscv_vid_v_u64m8_tumu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf8_mu( @@ -606,7 +606,7 @@ vuint64m8_t test_vid_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vid_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { - return vid_v_u8mf8_mu(mask, maskedoff, vl); + return __riscv_vid_v_u8mf8_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf4_mu( @@ -615,7 +615,7 @@ vuint8mf8_t test_vid_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vid_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { - return vid_v_u8mf4_mu(mask, maskedoff, vl); + return __riscv_vid_v_u8mf4_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8mf2_mu( @@ -624,7 +624,7 @@ vuint8mf4_t test_vid_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vid_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { - return vid_v_u8mf2_mu(mask, maskedoff, vl); + return __riscv_vid_v_u8mf2_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m1_mu( @@ -633,7 +633,7 @@ vuint8mf2_t test_vid_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vid_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { - return vid_v_u8m1_mu(mask, maskedoff, vl); + return __riscv_vid_v_u8m1_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m2_mu( @@ -642,7 +642,7 @@ vuint8m1_t test_vid_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vid_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { - return vid_v_u8m2_mu(mask, maskedoff, vl); + return __riscv_vid_v_u8m2_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m4_mu( @@ -651,7 +651,7 @@ vuint8m2_t test_vid_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vid_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { - return vid_v_u8m4_mu(mask, maskedoff, vl); + return __riscv_vid_v_u8m4_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u8m8_mu( @@ -660,7 +660,7 @@ vuint8m4_t test_vid_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vid_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { - return vid_v_u8m8_mu(mask, maskedoff, vl); + return __riscv_vid_v_u8m8_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf4_mu( @@ -669,7 +669,7 @@ vuint8m8_t test_vid_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vid_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { - return vid_v_u16mf4_mu(mask, maskedoff, vl); + return __riscv_vid_v_u16mf4_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16mf2_mu( @@ -678,7 +678,7 @@ vuint16mf4_t test_vid_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vid_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { - return vid_v_u16mf2_mu(mask, maskedoff, vl); + return __riscv_vid_v_u16mf2_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m1_mu( @@ -687,7 +687,7 @@ vuint16mf2_t test_vid_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vid_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { - return vid_v_u16m1_mu(mask, maskedoff, vl); + return __riscv_vid_v_u16m1_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m2_mu( @@ -696,7 +696,7 @@ vuint16m1_t test_vid_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vid_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { - return vid_v_u16m2_mu(mask, maskedoff, vl); + return __riscv_vid_v_u16m2_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m4_mu( @@ -705,7 +705,7 @@ vuint16m2_t test_vid_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vid_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { - return vid_v_u16m4_mu(mask, maskedoff, vl); + return __riscv_vid_v_u16m4_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u16m8_mu( @@ -714,7 +714,7 @@ vuint16m4_t test_vid_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vid_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { - return vid_v_u16m8_mu(mask, maskedoff, vl); + return __riscv_vid_v_u16m8_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32mf2_mu( @@ -723,7 +723,7 @@ vuint16m8_t test_vid_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vid_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { - return vid_v_u32mf2_mu(mask, maskedoff, vl); + return __riscv_vid_v_u32mf2_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m1_mu( @@ -732,7 +732,7 @@ vuint32mf2_t test_vid_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vid_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { - return vid_v_u32m1_mu(mask, maskedoff, vl); + return __riscv_vid_v_u32m1_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m2_mu( @@ -741,7 +741,7 @@ vuint32m1_t test_vid_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vid_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { - return vid_v_u32m2_mu(mask, maskedoff, vl); + return __riscv_vid_v_u32m2_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m4_mu( @@ -750,7 +750,7 @@ vuint32m2_t test_vid_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vid_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { - return vid_v_u32m4_mu(mask, maskedoff, vl); + return __riscv_vid_v_u32m4_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u32m8_mu( @@ -759,7 +759,7 @@ vuint32m4_t test_vid_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vid_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { - return vid_v_u32m8_mu(mask, maskedoff, vl); + return __riscv_vid_v_u32m8_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m1_mu( @@ -768,7 +768,7 @@ vuint32m8_t test_vid_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vid_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { - return vid_v_u64m1_mu(mask, maskedoff, vl); + return __riscv_vid_v_u64m1_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m2_mu( @@ -777,7 +777,7 @@ vuint64m1_t test_vid_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vid_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { - return vid_v_u64m2_mu(mask, maskedoff, vl); + return __riscv_vid_v_u64m2_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m4_mu( @@ -786,7 +786,7 @@ vuint64m2_t test_vid_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vid_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { - return vid_v_u64m4_mu(mask, maskedoff, vl); + return __riscv_vid_v_u64m4_mu(mask, maskedoff, vl); } // CHECK-RV64-LABEL: @test_vid_v_u64m8_mu( @@ -795,6 +795,6 @@ vuint64m4_t test_vid_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vid_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { - return vid_v_u64m8_mu(mask, maskedoff, vl); + return __riscv_vid_v_u64m8_mu(mask, maskedoff, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/viota.c index 9dccf2c3dce1..eea62c69894b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/viota.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/viota.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_viota_m_u8mf8_tu(vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u8mf8_tu(maskedoff, op1, vl); + return __riscv_viota_m_u8mf8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf4_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_viota_m_u8mf8_tu(vuint8mf8_t maskedoff, vbool64_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_viota_m_u8mf4_tu(vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u8mf4_tu(maskedoff, op1, vl); + return __riscv_viota_m_u8mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf2_tu( @@ -30,7 +30,7 @@ vuint8mf4_t test_viota_m_u8mf4_tu(vuint8mf4_t maskedoff, vbool32_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_viota_m_u8mf2_tu(vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u8mf2_tu(maskedoff, op1, vl); + return __riscv_viota_m_u8mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m1_tu( @@ -39,7 +39,7 @@ vuint8mf2_t test_viota_m_u8mf2_tu(vuint8mf2_t maskedoff, vbool16_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_viota_m_u8m1_tu(vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u8m1_tu(maskedoff, op1, vl); + return __riscv_viota_m_u8m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m2_tu( @@ -48,7 +48,7 @@ vuint8m1_t test_viota_m_u8m1_tu(vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_viota_m_u8m2_tu(vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u8m2_tu(maskedoff, op1, vl); + return __riscv_viota_m_u8m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m4_tu( @@ -57,7 +57,7 @@ vuint8m2_t test_viota_m_u8m2_tu(vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_viota_m_u8m4_tu(vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return viota_m_u8m4_tu(maskedoff, op1, vl); + return __riscv_viota_m_u8m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m8_tu( @@ -66,7 +66,7 @@ vuint8m4_t test_viota_m_u8m4_tu(vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_viota_m_u8m8_tu(vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return viota_m_u8m8_tu(maskedoff, op1, vl); + return __riscv_viota_m_u8m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf4_tu( @@ -75,7 +75,7 @@ vuint8m8_t test_viota_m_u8m8_tu(vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_viota_m_u16mf4_tu(vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u16mf4_tu(maskedoff, op1, vl); + return __riscv_viota_m_u16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf2_tu( @@ -84,7 +84,7 @@ vuint16mf4_t test_viota_m_u16mf4_tu(vuint16mf4_t maskedoff, vbool64_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_viota_m_u16mf2_tu(vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u16mf2_tu(maskedoff, op1, vl); + return __riscv_viota_m_u16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m1_tu( @@ -93,7 +93,7 @@ vuint16mf2_t test_viota_m_u16mf2_tu(vuint16mf2_t maskedoff, vbool32_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_viota_m_u16m1_tu(vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u16m1_tu(maskedoff, op1, vl); + return __riscv_viota_m_u16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m2_tu( @@ -102,7 +102,7 @@ vuint16m1_t test_viota_m_u16m1_tu(vuint16m1_t maskedoff, vbool16_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_viota_m_u16m2_tu(vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u16m2_tu(maskedoff, op1, vl); + return __riscv_viota_m_u16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m4_tu( @@ -111,7 +111,7 @@ vuint16m2_t test_viota_m_u16m2_tu(vuint16m2_t maskedoff, vbool8_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_viota_m_u16m4_tu(vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u16m4_tu(maskedoff, op1, vl); + return __riscv_viota_m_u16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m8_tu( @@ -120,7 +120,7 @@ vuint16m4_t test_viota_m_u16m4_tu(vuint16m4_t maskedoff, vbool4_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_viota_m_u16m8_tu(vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return viota_m_u16m8_tu(maskedoff, op1, vl); + return __riscv_viota_m_u16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32mf2_tu( @@ -129,7 +129,7 @@ vuint16m8_t test_viota_m_u16m8_tu(vuint16m8_t maskedoff, vbool2_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u32mf2_tu(maskedoff, op1, vl); + return __riscv_viota_m_u32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m1_tu( @@ -138,7 +138,7 @@ vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t maskedoff, vbool64_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_viota_m_u32m1_tu(vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u32m1_tu(maskedoff, op1, vl); + return __riscv_viota_m_u32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m2_tu( @@ -147,7 +147,7 @@ vuint32m1_t test_viota_m_u32m1_tu(vuint32m1_t maskedoff, vbool32_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_viota_m_u32m2_tu(vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u32m2_tu(maskedoff, op1, vl); + return __riscv_viota_m_u32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m4_tu( @@ -156,7 +156,7 @@ vuint32m2_t test_viota_m_u32m2_tu(vuint32m2_t maskedoff, vbool16_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_viota_m_u32m4_tu(vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u32m4_tu(maskedoff, op1, vl); + return __riscv_viota_m_u32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m8_tu( @@ -165,7 +165,7 @@ vuint32m4_t test_viota_m_u32m4_tu(vuint32m4_t maskedoff, vbool8_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_viota_m_u32m8_tu(vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u32m8_tu(maskedoff, op1, vl); + return __riscv_viota_m_u32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m1_tu( @@ -174,7 +174,7 @@ vuint32m8_t test_viota_m_u32m8_tu(vuint32m8_t maskedoff, vbool4_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_viota_m_u64m1_tu(vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u64m1_tu(maskedoff, op1, vl); + return __riscv_viota_m_u64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m2_tu( @@ -183,7 +183,7 @@ vuint64m1_t test_viota_m_u64m1_tu(vuint64m1_t maskedoff, vbool64_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_viota_m_u64m2_tu(vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u64m2_tu(maskedoff, op1, vl); + return __riscv_viota_m_u64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m4_tu( @@ -192,7 +192,7 @@ vuint64m2_t test_viota_m_u64m2_tu(vuint64m2_t maskedoff, vbool32_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_viota_m_u64m4_tu(vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u64m4_tu(maskedoff, op1, vl); + return __riscv_viota_m_u64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m8_tu( @@ -201,7 +201,7 @@ vuint64m4_t test_viota_m_u64m4_tu(vuint64m4_t maskedoff, vbool16_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_viota_m_u64m8_tu(vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u64m8_tu(maskedoff, op1, vl); + return __riscv_viota_m_u64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf8_tum( @@ -210,7 +210,7 @@ vuint64m8_t test_viota_m_u64m8_tu(vuint64m8_t maskedoff, vbool8_t op1, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_viota_m_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u8mf8_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8mf8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf4_tum( @@ -219,7 +219,7 @@ vuint8mf8_t test_viota_m_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vbool6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_viota_m_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u8mf4_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf2_tum( @@ -228,7 +228,7 @@ vuint8mf4_t test_viota_m_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vbool3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_viota_m_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u8mf2_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m1_tum( @@ -237,7 +237,7 @@ vuint8mf2_t test_viota_m_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vbool1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_viota_m_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u8m1_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m2_tum( @@ -246,7 +246,7 @@ vuint8m1_t test_viota_m_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_viota_m_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u8m2_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m4_tum( @@ -255,7 +255,7 @@ vuint8m2_t test_viota_m_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_viota_m_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return viota_m_u8m4_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m8_tum( @@ -264,7 +264,7 @@ vuint8m4_t test_viota_m_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_viota_m_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return viota_m_u8m8_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf4_tum( @@ -273,7 +273,7 @@ vuint8m8_t test_viota_m_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_viota_m_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf2_tum( @@ -282,7 +282,7 @@ vuint16mf4_t test_viota_m_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vbo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_viota_m_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m1_tum( @@ -291,7 +291,7 @@ vuint16mf2_t test_viota_m_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vbo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_viota_m_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u16m1_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m2_tum( @@ -300,7 +300,7 @@ vuint16m1_t test_viota_m_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vbool1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_viota_m_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u16m2_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m4_tum( @@ -309,7 +309,7 @@ vuint16m2_t test_viota_m_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vbool8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_viota_m_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u16m4_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m8_tum( @@ -318,7 +318,7 @@ vuint16m4_t test_viota_m_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vbool4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_viota_m_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return viota_m_u16m8_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32mf2_tum( @@ -327,7 +327,7 @@ vuint16m8_t test_viota_m_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vbool2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_viota_m_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m1_tum( @@ -336,7 +336,7 @@ vuint32mf2_t test_viota_m_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vbo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_viota_m_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u32m1_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m2_tum( @@ -345,7 +345,7 @@ vuint32m1_t test_viota_m_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vbool3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_viota_m_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u32m2_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m4_tum( @@ -354,7 +354,7 @@ vuint32m2_t test_viota_m_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vbool1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_viota_m_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u32m4_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m8_tum( @@ -363,7 +363,7 @@ vuint32m4_t test_viota_m_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vbool8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_viota_m_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u32m8_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m1_tum( @@ -372,7 +372,7 @@ vuint32m8_t test_viota_m_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vbool4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_viota_m_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u64m1_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m2_tum( @@ -381,7 +381,7 @@ vuint64m1_t test_viota_m_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vbool6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_viota_m_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u64m2_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m4_tum( @@ -390,7 +390,7 @@ vuint64m2_t test_viota_m_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vbool3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_viota_m_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u64m4_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m8_tum( @@ -399,7 +399,7 @@ vuint64m4_t test_viota_m_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vbool1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_viota_m_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u64m8_tum(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf8_tumu( @@ -408,7 +408,7 @@ vuint64m8_t test_viota_m_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vbool8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_viota_m_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u8mf8_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8mf8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf4_tumu( @@ -417,7 +417,7 @@ vuint8mf8_t test_viota_m_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_viota_m_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u8mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf2_tumu( @@ -426,7 +426,7 @@ vuint8mf4_t test_viota_m_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_viota_m_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u8mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m1_tumu( @@ -435,7 +435,7 @@ vuint8mf2_t test_viota_m_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_viota_m_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u8m1_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m2_tumu( @@ -444,7 +444,7 @@ vuint8m1_t test_viota_m_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_viota_m_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u8m2_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m4_tumu( @@ -453,7 +453,7 @@ vuint8m2_t test_viota_m_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_viota_m_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return viota_m_u8m4_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m8_tumu( @@ -462,7 +462,7 @@ vuint8m4_t test_viota_m_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_viota_m_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return viota_m_u8m8_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf4_tumu( @@ -471,7 +471,7 @@ vuint8m8_t test_viota_m_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_viota_m_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf2_tumu( @@ -480,7 +480,7 @@ vuint16mf4_t test_viota_m_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vb // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_viota_m_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m1_tumu( @@ -489,7 +489,7 @@ vuint16mf2_t test_viota_m_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vb // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_viota_m_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m2_tumu( @@ -498,7 +498,7 @@ vuint16m1_t test_viota_m_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_viota_m_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m4_tumu( @@ -507,7 +507,7 @@ vuint16m2_t test_viota_m_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vbool8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_viota_m_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m8_tumu( @@ -516,7 +516,7 @@ vuint16m4_t test_viota_m_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vbool4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_viota_m_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return viota_m_u16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32mf2_tumu( @@ -525,7 +525,7 @@ vuint16m8_t test_viota_m_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vbool2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m1_tumu( @@ -534,7 +534,7 @@ vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vb // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_viota_m_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m2_tumu( @@ -543,7 +543,7 @@ vuint32m1_t test_viota_m_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_viota_m_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m4_tumu( @@ -552,7 +552,7 @@ vuint32m2_t test_viota_m_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_viota_m_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m8_tumu( @@ -561,7 +561,7 @@ vuint32m4_t test_viota_m_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vbool8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_viota_m_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m1_tumu( @@ -570,7 +570,7 @@ vuint32m8_t test_viota_m_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vbool4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_viota_m_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m2_tumu( @@ -579,7 +579,7 @@ vuint64m1_t test_viota_m_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_viota_m_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m4_tumu( @@ -588,7 +588,7 @@ vuint64m2_t test_viota_m_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_viota_m_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m8_tumu( @@ -597,7 +597,7 @@ vuint64m4_t test_viota_m_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vbool // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_viota_m_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf8_mu( @@ -606,7 +606,7 @@ vuint64m8_t test_viota_m_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vbool8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_viota_m_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u8mf8_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8mf8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf4_mu( @@ -615,7 +615,7 @@ vuint8mf8_t test_viota_m_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vbool64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_viota_m_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u8mf4_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8mf2_mu( @@ -624,7 +624,7 @@ vuint8mf4_t test_viota_m_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vbool32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_viota_m_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u8mf2_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m1_mu( @@ -633,7 +633,7 @@ vuint8mf2_t test_viota_m_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vbool16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_viota_m_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u8m1_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m2_mu( @@ -642,7 +642,7 @@ vuint8m1_t test_viota_m_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_viota_m_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u8m2_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m4_mu( @@ -651,7 +651,7 @@ vuint8m2_t test_viota_m_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_viota_m_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return viota_m_u8m4_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u8m8_mu( @@ -660,7 +660,7 @@ vuint8m4_t test_viota_m_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_viota_m_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return viota_m_u8m8_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u8m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf4_mu( @@ -669,7 +669,7 @@ vuint8m8_t test_viota_m_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_viota_m_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16mf2_mu( @@ -678,7 +678,7 @@ vuint16mf4_t test_viota_m_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_viota_m_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m1_mu( @@ -687,7 +687,7 @@ vuint16mf2_t test_viota_m_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_viota_m_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u16m1_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m2_mu( @@ -696,7 +696,7 @@ vuint16m1_t test_viota_m_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vbool16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_viota_m_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u16m2_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m4_mu( @@ -705,7 +705,7 @@ vuint16m2_t test_viota_m_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_viota_m_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u16m4_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u16m8_mu( @@ -714,7 +714,7 @@ vuint16m4_t test_viota_m_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_viota_m_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return viota_m_u16m8_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32mf2_mu( @@ -723,7 +723,7 @@ vuint16m8_t test_viota_m_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_viota_m_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m1_mu( @@ -732,7 +732,7 @@ vuint32mf2_t test_viota_m_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vboo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_viota_m_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u32m1_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m2_mu( @@ -741,7 +741,7 @@ vuint32m1_t test_viota_m_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vbool32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_viota_m_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u32m2_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m4_mu( @@ -750,7 +750,7 @@ vuint32m2_t test_viota_m_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vbool16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_viota_m_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u32m4_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u32m8_mu( @@ -759,7 +759,7 @@ vuint32m4_t test_viota_m_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_viota_m_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return viota_m_u32m8_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m1_mu( @@ -768,7 +768,7 @@ vuint32m8_t test_viota_m_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_viota_m_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return viota_m_u64m1_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m2_mu( @@ -777,7 +777,7 @@ vuint64m1_t test_viota_m_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vbool64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_viota_m_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return viota_m_u64m2_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m4_mu( @@ -786,7 +786,7 @@ vuint64m2_t test_viota_m_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vbool32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_viota_m_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return viota_m_u64m4_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_viota_m_u64m8_mu( @@ -795,6 +795,6 @@ vuint64m4_t test_viota_m_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vbool16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_viota_m_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return viota_m_u64m8_mu(mask, maskedoff, op1, vl); + return __riscv_viota_m_u64m8_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle16.c index 2f40b26dc939..7f96f39b3f1c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vle16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf4_tu(maskedoff, base, vl); + return __riscv_vle16_v_f16mf4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vle16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vle16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf2_tu(maskedoff, base, vl); + return __riscv_vle16_v_f16mf2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vle16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vle16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m1_tu(maskedoff, base, vl); + return __riscv_vle16_v_f16m1_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vle16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vle16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m2_tu(maskedoff, base, vl); + return __riscv_vle16_v_f16m2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vle16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vle16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m4_tu(maskedoff, base, vl); + return __riscv_vle16_v_f16m4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vle16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vle16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m8_tu(maskedoff, base, vl); + return __riscv_vle16_v_f16m8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vle16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vle16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16mf4_tu(maskedoff, base, vl); + return __riscv_vle16_v_i16mf4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_tu( @@ -76,7 +76,7 @@ vint16mf4_t test_vle16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vle16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16mf2_tu(maskedoff, base, vl); + return __riscv_vle16_v_i16mf2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m1_tu( @@ -85,7 +85,7 @@ vint16mf2_t test_vle16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vle16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m1_tu(maskedoff, base, vl); + return __riscv_vle16_v_i16m1_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m2_tu( @@ -94,7 +94,7 @@ vint16m1_t test_vle16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vle16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m2_tu(maskedoff, base, vl); + return __riscv_vle16_v_i16m2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m4_tu( @@ -103,7 +103,7 @@ vint16m2_t test_vle16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vle16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m4_tu(maskedoff, base, vl); + return __riscv_vle16_v_i16m4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m8_tu( @@ -112,7 +112,7 @@ vint16m4_t test_vle16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vle16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m8_tu(maskedoff, base, vl); + return __riscv_vle16_v_i16m8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_tu( @@ -121,7 +121,7 @@ vint16m8_t test_vle16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vle16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16mf4_tu(maskedoff, base, vl); + return __riscv_vle16_v_u16mf4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_tu( @@ -130,7 +130,7 @@ vuint16mf4_t test_vle16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vle16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16mf2_tu(maskedoff, base, vl); + return __riscv_vle16_v_u16mf2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m1_tu( @@ -139,7 +139,7 @@ vuint16mf2_t test_vle16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vle16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m1_tu(maskedoff, base, vl); + return __riscv_vle16_v_u16m1_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m2_tu( @@ -148,7 +148,7 @@ vuint16m1_t test_vle16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vle16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m2_tu(maskedoff, base, vl); + return __riscv_vle16_v_u16m2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m4_tu( @@ -157,7 +157,7 @@ vuint16m2_t test_vle16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vle16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m4_tu(maskedoff, base, vl); + return __riscv_vle16_v_u16m4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m8_tu( @@ -166,7 +166,7 @@ vuint16m4_t test_vle16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vle16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m8_tu(maskedoff, base, vl); + return __riscv_vle16_v_u16m8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf4_tum( @@ -175,7 +175,7 @@ vuint16m8_t test_vle16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vle16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf4_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16mf4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf2_tum( @@ -184,7 +184,7 @@ vfloat16mf4_t test_vle16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vle16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf2_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16mf2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m1_tum( @@ -193,7 +193,7 @@ vfloat16mf2_t test_vle16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vle16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m1_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m1_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m2_tum( @@ -202,7 +202,7 @@ vfloat16m1_t test_vle16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vle16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m2_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m4_tum( @@ -211,7 +211,7 @@ vfloat16m2_t test_vle16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vle16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m4_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m8_tum( @@ -220,7 +220,7 @@ vfloat16m4_t test_vle16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vle16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m8_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_tum( @@ -229,7 +229,7 @@ vfloat16m8_t test_vle16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vle16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16mf4_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16mf4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_tum( @@ -238,7 +238,7 @@ vint16mf4_t test_vle16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vle16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16mf2_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16mf2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m1_tum( @@ -247,7 +247,7 @@ vint16mf2_t test_vle16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vle16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m1_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m1_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m2_tum( @@ -256,7 +256,7 @@ vint16m1_t test_vle16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vle16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m2_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m4_tum( @@ -265,7 +265,7 @@ vint16m2_t test_vle16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vle16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m4_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m8_tum( @@ -274,7 +274,7 @@ vint16m4_t test_vle16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vle16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m8_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_tum( @@ -283,7 +283,7 @@ vint16m8_t test_vle16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vle16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16mf4_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16mf4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_tum( @@ -292,7 +292,7 @@ vuint16mf4_t test_vle16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vle16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16mf2_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16mf2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m1_tum( @@ -301,7 +301,7 @@ vuint16mf2_t test_vle16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vle16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m1_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m1_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m2_tum( @@ -310,7 +310,7 @@ vuint16m1_t test_vle16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vle16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m2_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m4_tum( @@ -319,7 +319,7 @@ vuint16m2_t test_vle16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vle16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m4_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m8_tum( @@ -328,7 +328,7 @@ vuint16m4_t test_vle16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vle16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m8_tum(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf4_tumu( @@ -337,7 +337,7 @@ vuint16m8_t test_vle16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vle16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf4_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16mf4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf2_tumu( @@ -346,7 +346,7 @@ vfloat16mf4_t test_vle16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vle16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf2_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16mf2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m1_tumu( @@ -355,7 +355,7 @@ vfloat16mf2_t test_vle16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vle16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m1_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m1_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m2_tumu( @@ -364,7 +364,7 @@ vfloat16m1_t test_vle16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vle16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m2_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m4_tumu( @@ -373,7 +373,7 @@ vfloat16m2_t test_vle16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vle16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m4_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m8_tumu( @@ -382,7 +382,7 @@ vfloat16m4_t test_vle16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vle16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m8_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_tumu( @@ -391,7 +391,7 @@ vfloat16m8_t test_vle16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vle16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16mf4_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16mf4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_tumu( @@ -400,7 +400,7 @@ vint16mf4_t test_vle16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vle16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16mf2_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16mf2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m1_tumu( @@ -409,7 +409,7 @@ vint16mf2_t test_vle16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vle16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m1_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m1_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m2_tumu( @@ -418,7 +418,7 @@ vint16m1_t test_vle16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vle16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m2_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m4_tumu( @@ -427,7 +427,7 @@ vint16m2_t test_vle16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vle16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m4_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m8_tumu( @@ -436,7 +436,7 @@ vint16m4_t test_vle16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vle16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m8_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_tumu( @@ -445,7 +445,7 @@ vint16m8_t test_vle16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vle16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16mf4_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16mf4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_tumu( @@ -454,7 +454,7 @@ vuint16mf4_t test_vle16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vle16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16mf2_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16mf2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m1_tumu( @@ -463,7 +463,7 @@ vuint16mf2_t test_vle16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vle16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m1_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m1_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m2_tumu( @@ -472,7 +472,7 @@ vuint16m1_t test_vle16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vle16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m2_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m4_tumu( @@ -481,7 +481,7 @@ vuint16m2_t test_vle16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vle16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m4_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m8_tumu( @@ -490,7 +490,7 @@ vuint16m4_t test_vle16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vle16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m8_tumu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf4_mu( @@ -499,7 +499,7 @@ vuint16m8_t test_vle16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vle16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf4_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16mf4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16mf2_mu( @@ -508,7 +508,7 @@ vfloat16mf4_t test_vle16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vle16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf2_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16mf2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m1_mu( @@ -517,7 +517,7 @@ vfloat16mf2_t test_vle16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vle16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m1_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m1_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m2_mu( @@ -526,7 +526,7 @@ vfloat16m1_t test_vle16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vle16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m2_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m4_mu( @@ -535,7 +535,7 @@ vfloat16m2_t test_vle16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vle16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m4_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_f16m8_mu( @@ -544,7 +544,7 @@ vfloat16m4_t test_vle16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vle16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m8_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_f16m8_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_mu( @@ -553,7 +553,7 @@ vfloat16m8_t test_vle16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vle16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16mf4_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16mf4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_mu( @@ -562,7 +562,7 @@ vint16mf4_t test_vle16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vle16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16mf2_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16mf2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m1_mu( @@ -571,7 +571,7 @@ vint16mf2_t test_vle16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vle16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m1_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m1_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m2_mu( @@ -580,7 +580,7 @@ vint16m1_t test_vle16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vle16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m2_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m4_mu( @@ -589,7 +589,7 @@ vint16m2_t test_vle16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vle16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m4_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_i16m8_mu( @@ -598,7 +598,7 @@ vint16m4_t test_vle16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vle16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return vle16_v_i16m8_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_i16m8_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_mu( @@ -607,7 +607,7 @@ vint16m8_t test_vle16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vle16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16mf4_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16mf4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_mu( @@ -616,7 +616,7 @@ vuint16mf4_t test_vle16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vle16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16mf2_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16mf2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m1_mu( @@ -625,7 +625,7 @@ vuint16mf2_t test_vle16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vle16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m1_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m1_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m2_mu( @@ -634,7 +634,7 @@ vuint16m1_t test_vle16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vle16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m2_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m4_mu( @@ -643,7 +643,7 @@ vuint16m2_t test_vle16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vle16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m4_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle16_v_u16m8_mu( @@ -652,6 +652,6 @@ vuint16m4_t test_vle16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vle16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return vle16_v_u16m8_mu(mask, maskedoff, base, vl); + return __riscv_vle16_v_u16m8_mu(mask, maskedoff, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle16ff.c index 7838873bf8a8..2908a3dabfe5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle16ff.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vle16ff_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16mf4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_tu( @@ -28,7 +28,7 @@ vfloat16mf4_t test_vle16ff_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 * // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vle16ff_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16mf2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vle16ff_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 * // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vle16ff_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m1_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m1_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_tu( @@ -52,7 +52,7 @@ vfloat16m1_t test_vle16ff_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vle16ff_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_tu( @@ -64,7 +64,7 @@ vfloat16m2_t test_vle16ff_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vle16ff_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_tu( @@ -76,7 +76,7 @@ vfloat16m4_t test_vle16ff_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vle16ff_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_tu( @@ -88,7 +88,7 @@ vfloat16m8_t test_vle16ff_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16ff_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16mf4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_tu( @@ -100,7 +100,7 @@ vint16mf4_t test_vle16ff_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16ff_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16mf2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_tu( @@ -112,7 +112,7 @@ vint16mf2_t test_vle16ff_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16ff_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m1_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m1_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_tu( @@ -124,7 +124,7 @@ vint16m1_t test_vle16ff_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16ff_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_tu( @@ -136,7 +136,7 @@ vint16m2_t test_vle16ff_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16ff_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_tu( @@ -148,7 +148,7 @@ vint16m4_t test_vle16ff_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16ff_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_tu( @@ -160,7 +160,7 @@ vint16m8_t test_vle16ff_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16ff_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16mf4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_tu( @@ -172,7 +172,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *ba // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16ff_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16mf2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_tu( @@ -184,7 +184,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *ba // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16ff_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m1_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m1_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_tu( @@ -196,7 +196,7 @@ vuint16m1_t test_vle16ff_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16ff_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_tu( @@ -208,7 +208,7 @@ vuint16m2_t test_vle16ff_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16ff_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_tu( @@ -220,7 +220,7 @@ vuint16m4_t test_vle16ff_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16ff_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_tum( @@ -232,7 +232,7 @@ vuint16m8_t test_vle16ff_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vle16ff_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16mf4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_tum( @@ -244,7 +244,7 @@ vfloat16mf4_t test_vle16ff_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vle16ff_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16mf2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_tum( @@ -256,7 +256,7 @@ vfloat16mf2_t test_vle16ff_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vle16ff_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m1_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m1_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_tum( @@ -268,7 +268,7 @@ vfloat16m1_t test_vle16ff_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vle16ff_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_tum( @@ -280,7 +280,7 @@ vfloat16m2_t test_vle16ff_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vle16ff_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_tum( @@ -292,7 +292,7 @@ vfloat16m4_t test_vle16ff_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vle16ff_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_tum( @@ -304,7 +304,7 @@ vfloat16m8_t test_vle16ff_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16ff_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16mf4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_tum( @@ -316,7 +316,7 @@ vint16mf4_t test_vle16ff_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16ff_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16mf2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_tum( @@ -328,7 +328,7 @@ vint16mf2_t test_vle16ff_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16ff_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m1_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m1_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_tum( @@ -340,7 +340,7 @@ vint16m1_t test_vle16ff_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16ff_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_tum( @@ -352,7 +352,7 @@ vint16m2_t test_vle16ff_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16ff_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_tum( @@ -364,7 +364,7 @@ vint16m4_t test_vle16ff_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16ff_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_tum( @@ -376,7 +376,7 @@ vint16m8_t test_vle16ff_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16ff_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16mf4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_tum( @@ -388,7 +388,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16ff_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16mf2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_tum( @@ -400,7 +400,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16ff_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m1_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m1_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_tum( @@ -412,7 +412,7 @@ vuint16m1_t test_vle16ff_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16ff_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_tum( @@ -424,7 +424,7 @@ vuint16m2_t test_vle16ff_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16ff_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_tum( @@ -436,7 +436,7 @@ vuint16m4_t test_vle16ff_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16ff_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_tumu( @@ -448,7 +448,7 @@ vuint16m8_t test_vle16ff_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vle16ff_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16mf4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_tumu( @@ -460,7 +460,7 @@ vfloat16mf4_t test_vle16ff_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vle16ff_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16mf2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_tumu( @@ -472,7 +472,7 @@ vfloat16mf2_t test_vle16ff_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vle16ff_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m1_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m1_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_tumu( @@ -484,7 +484,7 @@ vfloat16m1_t test_vle16ff_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vle16ff_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_tumu( @@ -496,7 +496,7 @@ vfloat16m2_t test_vle16ff_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vle16ff_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_tumu( @@ -508,7 +508,7 @@ vfloat16m4_t test_vle16ff_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vle16ff_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_tumu( @@ -520,7 +520,7 @@ vfloat16m8_t test_vle16ff_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16ff_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16mf4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_tumu( @@ -532,7 +532,7 @@ vint16mf4_t test_vle16ff_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16ff_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16mf2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_tumu( @@ -544,7 +544,7 @@ vint16mf2_t test_vle16ff_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16ff_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m1_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m1_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_tumu( @@ -556,7 +556,7 @@ vint16m1_t test_vle16ff_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16ff_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_tumu( @@ -568,7 +568,7 @@ vint16m2_t test_vle16ff_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16ff_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_tumu( @@ -580,7 +580,7 @@ vint16m4_t test_vle16ff_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16ff_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_tumu( @@ -592,7 +592,7 @@ vint16m8_t test_vle16ff_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16ff_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16mf4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_tumu( @@ -604,7 +604,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16ff_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16mf2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_tumu( @@ -616,7 +616,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16ff_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m1_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m1_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_tumu( @@ -628,7 +628,7 @@ vuint16m1_t test_vle16ff_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16ff_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_tumu( @@ -640,7 +640,7 @@ vuint16m2_t test_vle16ff_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16ff_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_tumu( @@ -652,7 +652,7 @@ vuint16m4_t test_vle16ff_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16ff_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_mu( @@ -664,7 +664,7 @@ vuint16m8_t test_vle16ff_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vle16ff_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16mf4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_mu( @@ -676,7 +676,7 @@ vfloat16mf4_t test_vle16ff_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vle16ff_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16mf2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_mu( @@ -688,7 +688,7 @@ vfloat16mf2_t test_vle16ff_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vle16ff_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m1_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m1_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_mu( @@ -700,7 +700,7 @@ vfloat16m1_t test_vle16ff_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vle16ff_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_mu( @@ -712,7 +712,7 @@ vfloat16m2_t test_vle16ff_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vle16ff_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_mu( @@ -724,7 +724,7 @@ vfloat16m4_t test_vle16ff_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vle16ff_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_f16m8_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_mu( @@ -736,7 +736,7 @@ vfloat16m8_t test_vle16ff_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16ff_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16mf4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_mu( @@ -748,7 +748,7 @@ vint16mf4_t test_vle16ff_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16ff_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16mf2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16mf2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_mu( @@ -760,7 +760,7 @@ vint16mf2_t test_vle16ff_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16ff_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m1_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m1_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_mu( @@ -772,7 +772,7 @@ vint16m1_t test_vle16ff_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16ff_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_mu( @@ -784,7 +784,7 @@ vint16m2_t test_vle16ff_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16ff_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_mu( @@ -796,7 +796,7 @@ vint16m4_t test_vle16ff_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16ff_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_i16m8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_i16m8_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_mu( @@ -808,7 +808,7 @@ vint16m8_t test_vle16ff_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16ff_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16mf4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_mu( @@ -820,7 +820,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16ff_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16mf2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16mf2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_mu( @@ -832,7 +832,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16ff_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m1_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m1_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_mu( @@ -844,7 +844,7 @@ vuint16m1_t test_vle16ff_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16ff_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_mu( @@ -856,7 +856,7 @@ vuint16m2_t test_vle16ff_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16ff_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_mu( @@ -868,6 +868,6 @@ vuint16m4_t test_vle16ff_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16ff_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return vle16ff_v_u16m8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle16ff_v_u16m8_mu(mask, maskedoff, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle32.c index d7a96aa20808..35ca4d85c5a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vle32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32mf2_tu(maskedoff, base, vl); + return __riscv_vle32_v_f32mf2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m1_tu( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vle32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vle32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m1_tu(maskedoff, base, vl); + return __riscv_vle32_v_f32m1_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m2_tu( @@ -31,7 +31,7 @@ vfloat32m1_t test_vle32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vle32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m2_tu(maskedoff, base, vl); + return __riscv_vle32_v_f32m2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m4_tu( @@ -40,7 +40,7 @@ vfloat32m2_t test_vle32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vle32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m4_tu(maskedoff, base, vl); + return __riscv_vle32_v_f32m4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m8_tu( @@ -49,7 +49,7 @@ vfloat32m4_t test_vle32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vle32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m8_tu(maskedoff, base, vl); + return __riscv_vle32_v_f32m8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_tu( @@ -58,7 +58,7 @@ vfloat32m8_t test_vle32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vle32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32mf2_tu(maskedoff, base, vl); + return __riscv_vle32_v_i32mf2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m1_tu( @@ -67,7 +67,7 @@ vint32mf2_t test_vle32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vle32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m1_tu(maskedoff, base, vl); + return __riscv_vle32_v_i32m1_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m2_tu( @@ -76,7 +76,7 @@ vint32m1_t test_vle32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vle32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m2_tu(maskedoff, base, vl); + return __riscv_vle32_v_i32m2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m4_tu( @@ -85,7 +85,7 @@ vint32m2_t test_vle32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vle32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m4_tu(maskedoff, base, vl); + return __riscv_vle32_v_i32m4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m8_tu( @@ -94,7 +94,7 @@ vint32m4_t test_vle32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vle32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m8_tu(maskedoff, base, vl); + return __riscv_vle32_v_i32m8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_tu( @@ -103,7 +103,7 @@ vint32m8_t test_vle32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vle32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32mf2_tu(maskedoff, base, vl); + return __riscv_vle32_v_u32mf2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m1_tu( @@ -112,7 +112,7 @@ vuint32mf2_t test_vle32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vle32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m1_tu(maskedoff, base, vl); + return __riscv_vle32_v_u32m1_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m2_tu( @@ -121,7 +121,7 @@ vuint32m1_t test_vle32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vle32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m2_tu(maskedoff, base, vl); + return __riscv_vle32_v_u32m2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m4_tu( @@ -130,7 +130,7 @@ vuint32m2_t test_vle32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vle32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m4_tu(maskedoff, base, vl); + return __riscv_vle32_v_u32m4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m8_tu( @@ -139,7 +139,7 @@ vuint32m4_t test_vle32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vle32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m8_tu(maskedoff, base, vl); + return __riscv_vle32_v_u32m8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_tum( @@ -148,7 +148,7 @@ vuint32m8_t test_vle32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vle32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32mf2_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32mf2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m1_tum( @@ -157,7 +157,7 @@ vfloat32mf2_t test_vle32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vle32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m1_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m1_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m2_tum( @@ -166,7 +166,7 @@ vfloat32m1_t test_vle32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vle32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m2_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m4_tum( @@ -175,7 +175,7 @@ vfloat32m2_t test_vle32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vle32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m4_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m8_tum( @@ -184,7 +184,7 @@ vfloat32m4_t test_vle32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vle32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m8_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_tum( @@ -193,7 +193,7 @@ vfloat32m8_t test_vle32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vle32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32mf2_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32mf2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m1_tum( @@ -202,7 +202,7 @@ vint32mf2_t test_vle32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vle32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m1_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m1_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m2_tum( @@ -211,7 +211,7 @@ vint32m1_t test_vle32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vle32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m2_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m4_tum( @@ -220,7 +220,7 @@ vint32m2_t test_vle32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vle32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m4_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m8_tum( @@ -229,7 +229,7 @@ vint32m4_t test_vle32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vle32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m8_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_tum( @@ -238,7 +238,7 @@ vint32m8_t test_vle32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vle32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32mf2_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32mf2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m1_tum( @@ -247,7 +247,7 @@ vuint32mf2_t test_vle32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vle32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m1_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m1_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m2_tum( @@ -256,7 +256,7 @@ vuint32m1_t test_vle32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vle32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m2_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m4_tum( @@ -265,7 +265,7 @@ vuint32m2_t test_vle32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vle32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m4_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m8_tum( @@ -274,7 +274,7 @@ vuint32m4_t test_vle32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vle32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m8_tum(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_tumu( @@ -283,7 +283,7 @@ vuint32m8_t test_vle32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vle32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32mf2_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32mf2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m1_tumu( @@ -292,7 +292,7 @@ vfloat32mf2_t test_vle32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vle32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m1_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m1_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m2_tumu( @@ -301,7 +301,7 @@ vfloat32m1_t test_vle32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vle32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m2_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m4_tumu( @@ -310,7 +310,7 @@ vfloat32m2_t test_vle32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vle32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m4_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m8_tumu( @@ -319,7 +319,7 @@ vfloat32m4_t test_vle32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vle32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m8_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_tumu( @@ -328,7 +328,7 @@ vfloat32m8_t test_vle32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vle32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32mf2_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32mf2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m1_tumu( @@ -337,7 +337,7 @@ vint32mf2_t test_vle32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vle32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m1_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m1_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m2_tumu( @@ -346,7 +346,7 @@ vint32m1_t test_vle32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vle32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m2_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m4_tumu( @@ -355,7 +355,7 @@ vint32m2_t test_vle32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vle32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m4_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m8_tumu( @@ -364,7 +364,7 @@ vint32m4_t test_vle32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vle32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m8_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_tumu( @@ -373,7 +373,7 @@ vint32m8_t test_vle32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vle32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32mf2_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32mf2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m1_tumu( @@ -382,7 +382,7 @@ vuint32mf2_t test_vle32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vle32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m1_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m1_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m2_tumu( @@ -391,7 +391,7 @@ vuint32m1_t test_vle32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vle32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m2_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m4_tumu( @@ -400,7 +400,7 @@ vuint32m2_t test_vle32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vle32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m4_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m8_tumu( @@ -409,7 +409,7 @@ vuint32m4_t test_vle32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vle32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m8_tumu(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_mu( @@ -418,7 +418,7 @@ vuint32m8_t test_vle32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vle32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32mf2_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32mf2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m1_mu( @@ -427,7 +427,7 @@ vfloat32mf2_t test_vle32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vle32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m1_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m1_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m2_mu( @@ -436,7 +436,7 @@ vfloat32m1_t test_vle32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vle32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m2_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m4_mu( @@ -445,7 +445,7 @@ vfloat32m2_t test_vle32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vle32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m4_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_f32m8_mu( @@ -454,7 +454,7 @@ vfloat32m4_t test_vle32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vle32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { - return vle32_v_f32m8_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_f32m8_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_mu( @@ -463,7 +463,7 @@ vfloat32m8_t test_vle32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vle32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32mf2_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32mf2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m1_mu( @@ -472,7 +472,7 @@ vint32mf2_t test_vle32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vle32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m1_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m1_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m2_mu( @@ -481,7 +481,7 @@ vint32m1_t test_vle32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vle32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m2_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m4_mu( @@ -490,7 +490,7 @@ vint32m2_t test_vle32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vle32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m4_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_i32m8_mu( @@ -499,7 +499,7 @@ vint32m4_t test_vle32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vle32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return vle32_v_i32m8_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_i32m8_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_mu( @@ -508,7 +508,7 @@ vint32m8_t test_vle32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vle32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32mf2_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32mf2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m1_mu( @@ -517,7 +517,7 @@ vuint32mf2_t test_vle32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vle32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m1_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m1_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m2_mu( @@ -526,7 +526,7 @@ vuint32m1_t test_vle32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vle32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m2_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m4_mu( @@ -535,7 +535,7 @@ vuint32m2_t test_vle32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vle32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m4_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle32_v_u32m8_mu( @@ -544,6 +544,6 @@ vuint32m4_t test_vle32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vle32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return vle32_v_u32m8_mu(mask, maskedoff, base, vl); + return __riscv_vle32_v_u32m8_mu(mask, maskedoff, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle32ff.c index 16f670e105d1..94ec11f06c94 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle32ff.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32ff_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32mf2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32mf2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_tu( @@ -28,7 +28,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *bas // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32ff_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m1_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m1_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_tu( @@ -40,7 +40,7 @@ vfloat32m1_t test_vle32ff_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32ff_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_tu( @@ -52,7 +52,7 @@ vfloat32m2_t test_vle32ff_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32ff_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_tu( @@ -64,7 +64,7 @@ vfloat32m4_t test_vle32ff_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32ff_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_tu( @@ -76,7 +76,7 @@ vfloat32m8_t test_vle32ff_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32ff_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32mf2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32mf2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_tu( @@ -88,7 +88,7 @@ vint32mf2_t test_vle32ff_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32ff_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m1_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m1_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_tu( @@ -100,7 +100,7 @@ vint32m1_t test_vle32ff_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32ff_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_tu( @@ -112,7 +112,7 @@ vint32m2_t test_vle32ff_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32ff_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_tu( @@ -124,7 +124,7 @@ vint32m4_t test_vle32ff_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32ff_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_tu( @@ -136,7 +136,7 @@ vint32m8_t test_vle32ff_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32ff_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32mf2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32mf2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_tu( @@ -148,7 +148,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *ba // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32ff_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m1_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m1_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_tu( @@ -160,7 +160,7 @@ vuint32m1_t test_vle32ff_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32ff_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_tu( @@ -172,7 +172,7 @@ vuint32m2_t test_vle32ff_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32ff_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_tu( @@ -184,7 +184,7 @@ vuint32m4_t test_vle32ff_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32ff_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_tum( @@ -196,7 +196,7 @@ vuint32m8_t test_vle32ff_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32ff_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32mf2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32mf2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_tum( @@ -208,7 +208,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32ff_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m1_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m1_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_tum( @@ -220,7 +220,7 @@ vfloat32m1_t test_vle32ff_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32ff_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_tum( @@ -232,7 +232,7 @@ vfloat32m2_t test_vle32ff_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32ff_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_tum( @@ -244,7 +244,7 @@ vfloat32m4_t test_vle32ff_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32ff_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_tum( @@ -256,7 +256,7 @@ vfloat32m8_t test_vle32ff_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32ff_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32mf2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32mf2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_tum( @@ -268,7 +268,7 @@ vint32mf2_t test_vle32ff_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32ff_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m1_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m1_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_tum( @@ -280,7 +280,7 @@ vint32m1_t test_vle32ff_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32ff_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_tum( @@ -292,7 +292,7 @@ vint32m2_t test_vle32ff_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32ff_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_tum( @@ -304,7 +304,7 @@ vint32m4_t test_vle32ff_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32ff_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_tum( @@ -316,7 +316,7 @@ vint32m8_t test_vle32ff_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32ff_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32mf2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32mf2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_tum( @@ -328,7 +328,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32ff_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m1_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m1_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_tum( @@ -340,7 +340,7 @@ vuint32m1_t test_vle32ff_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32ff_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_tum( @@ -352,7 +352,7 @@ vuint32m2_t test_vle32ff_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32ff_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_tum( @@ -364,7 +364,7 @@ vuint32m4_t test_vle32ff_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32ff_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_tumu( @@ -376,7 +376,7 @@ vuint32m8_t test_vle32ff_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32ff_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32mf2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32mf2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_tumu( @@ -388,7 +388,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32ff_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m1_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m1_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_tumu( @@ -400,7 +400,7 @@ vfloat32m1_t test_vle32ff_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32ff_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_tumu( @@ -412,7 +412,7 @@ vfloat32m2_t test_vle32ff_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32ff_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_tumu( @@ -424,7 +424,7 @@ vfloat32m4_t test_vle32ff_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32ff_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_tumu( @@ -436,7 +436,7 @@ vfloat32m8_t test_vle32ff_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32ff_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32mf2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32mf2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_tumu( @@ -448,7 +448,7 @@ vint32mf2_t test_vle32ff_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32ff_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m1_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m1_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_tumu( @@ -460,7 +460,7 @@ vint32m1_t test_vle32ff_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32ff_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_tumu( @@ -472,7 +472,7 @@ vint32m2_t test_vle32ff_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32ff_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_tumu( @@ -484,7 +484,7 @@ vint32m4_t test_vle32ff_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32ff_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_tumu( @@ -496,7 +496,7 @@ vint32m8_t test_vle32ff_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32ff_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32mf2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32mf2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_tumu( @@ -508,7 +508,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32ff_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m1_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m1_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_tumu( @@ -520,7 +520,7 @@ vuint32m1_t test_vle32ff_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32ff_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_tumu( @@ -532,7 +532,7 @@ vuint32m2_t test_vle32ff_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32ff_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_tumu( @@ -544,7 +544,7 @@ vuint32m4_t test_vle32ff_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32ff_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_mu( @@ -556,7 +556,7 @@ vuint32m8_t test_vle32ff_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32ff_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32mf2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32mf2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_mu( @@ -568,7 +568,7 @@ vfloat32mf2_t test_vle32ff_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32ff_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m1_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m1_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_mu( @@ -580,7 +580,7 @@ vfloat32m1_t test_vle32ff_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32ff_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_mu( @@ -592,7 +592,7 @@ vfloat32m2_t test_vle32ff_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32ff_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_mu( @@ -604,7 +604,7 @@ vfloat32m4_t test_vle32ff_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32ff_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_f32m8_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_mu( @@ -616,7 +616,7 @@ vfloat32m8_t test_vle32ff_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32ff_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32mf2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32mf2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_mu( @@ -628,7 +628,7 @@ vint32mf2_t test_vle32ff_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32ff_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m1_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m1_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_mu( @@ -640,7 +640,7 @@ vint32m1_t test_vle32ff_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32ff_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_mu( @@ -652,7 +652,7 @@ vint32m2_t test_vle32ff_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32ff_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_mu( @@ -664,7 +664,7 @@ vint32m4_t test_vle32ff_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32ff_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_i32m8_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_mu( @@ -676,7 +676,7 @@ vint32m8_t test_vle32ff_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32ff_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32mf2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32mf2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_mu( @@ -688,7 +688,7 @@ vuint32mf2_t test_vle32ff_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32ff_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m1_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m1_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_mu( @@ -700,7 +700,7 @@ vuint32m1_t test_vle32ff_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32ff_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_mu( @@ -712,7 +712,7 @@ vuint32m2_t test_vle32ff_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32ff_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_mu( @@ -724,6 +724,6 @@ vuint32m4_t test_vle32ff_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32ff_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle32ff_v_u32m8_mu(mask, maskedoff, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle64.c index 3796da074e63..53ca08b1b850 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vle64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m1_tu(maskedoff, base, vl); + return __riscv_vle64_v_f64m1_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m2_tu( @@ -22,7 +22,7 @@ vfloat64m1_t test_vle64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vle64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m2_tu(maskedoff, base, vl); + return __riscv_vle64_v_f64m2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m4_tu( @@ -31,7 +31,7 @@ vfloat64m2_t test_vle64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vle64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m4_tu(maskedoff, base, vl); + return __riscv_vle64_v_f64m4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m8_tu( @@ -40,7 +40,7 @@ vfloat64m4_t test_vle64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vle64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m8_tu(maskedoff, base, vl); + return __riscv_vle64_v_f64m8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m1_tu( @@ -49,7 +49,7 @@ vfloat64m8_t test_vle64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vle64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m1_tu(maskedoff, base, vl); + return __riscv_vle64_v_i64m1_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m2_tu( @@ -58,7 +58,7 @@ vint64m1_t test_vle64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vle64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m2_tu(maskedoff, base, vl); + return __riscv_vle64_v_i64m2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m4_tu( @@ -67,7 +67,7 @@ vint64m2_t test_vle64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vle64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m4_tu(maskedoff, base, vl); + return __riscv_vle64_v_i64m4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m8_tu( @@ -76,7 +76,7 @@ vint64m4_t test_vle64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vle64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m8_tu(maskedoff, base, vl); + return __riscv_vle64_v_i64m8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m1_tu( @@ -85,7 +85,7 @@ vint64m8_t test_vle64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vle64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m1_tu(maskedoff, base, vl); + return __riscv_vle64_v_u64m1_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m2_tu( @@ -94,7 +94,7 @@ vuint64m1_t test_vle64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vle64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m2_tu(maskedoff, base, vl); + return __riscv_vle64_v_u64m2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m4_tu( @@ -103,7 +103,7 @@ vuint64m2_t test_vle64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vle64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m4_tu(maskedoff, base, vl); + return __riscv_vle64_v_u64m4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m8_tu( @@ -112,7 +112,7 @@ vuint64m4_t test_vle64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vle64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m8_tu(maskedoff, base, vl); + return __riscv_vle64_v_u64m8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m1_tum( @@ -121,7 +121,7 @@ vuint64m8_t test_vle64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, s // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vle64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m1_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m1_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m2_tum( @@ -130,7 +130,7 @@ vfloat64m1_t test_vle64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vle64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m2_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m4_tum( @@ -139,7 +139,7 @@ vfloat64m2_t test_vle64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vle64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m4_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m8_tum( @@ -148,7 +148,7 @@ vfloat64m4_t test_vle64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vle64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m8_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m1_tum( @@ -157,7 +157,7 @@ vfloat64m8_t test_vle64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vle64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m1_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m1_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m2_tum( @@ -166,7 +166,7 @@ vint64m1_t test_vle64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vle64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m2_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m4_tum( @@ -175,7 +175,7 @@ vint64m2_t test_vle64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vle64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m4_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m8_tum( @@ -184,7 +184,7 @@ vint64m4_t test_vle64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vle64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m8_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m1_tum( @@ -193,7 +193,7 @@ vint64m8_t test_vle64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vle64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m1_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m1_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m2_tum( @@ -202,7 +202,7 @@ vuint64m1_t test_vle64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vle64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m2_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m4_tum( @@ -211,7 +211,7 @@ vuint64m2_t test_vle64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vle64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m4_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m8_tum( @@ -220,7 +220,7 @@ vuint64m4_t test_vle64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vle64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m8_tum(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m1_tumu( @@ -229,7 +229,7 @@ vuint64m8_t test_vle64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vle64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m1_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m1_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m2_tumu( @@ -238,7 +238,7 @@ vfloat64m1_t test_vle64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vle64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m2_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m4_tumu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vle64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vle64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m4_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m8_tumu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vle64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vle64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m8_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m1_tumu( @@ -265,7 +265,7 @@ vfloat64m8_t test_vle64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vle64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m1_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m1_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m2_tumu( @@ -274,7 +274,7 @@ vint64m1_t test_vle64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vle64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m2_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m4_tumu( @@ -283,7 +283,7 @@ vint64m2_t test_vle64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vle64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m4_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m8_tumu( @@ -292,7 +292,7 @@ vint64m4_t test_vle64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vle64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m8_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m1_tumu( @@ -301,7 +301,7 @@ vint64m8_t test_vle64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vle64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m1_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m1_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m2_tumu( @@ -310,7 +310,7 @@ vuint64m1_t test_vle64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vle64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m2_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m4_tumu( @@ -319,7 +319,7 @@ vuint64m2_t test_vle64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vle64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m4_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m8_tumu( @@ -328,7 +328,7 @@ vuint64m4_t test_vle64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vle64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m8_tumu(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m1_mu( @@ -337,7 +337,7 @@ vuint64m8_t test_vle64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vle64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m1_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m1_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m2_mu( @@ -346,7 +346,7 @@ vfloat64m1_t test_vle64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vle64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m2_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m4_mu( @@ -355,7 +355,7 @@ vfloat64m2_t test_vle64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vle64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m4_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_f64m8_mu( @@ -364,7 +364,7 @@ vfloat64m4_t test_vle64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vle64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { - return vle64_v_f64m8_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_f64m8_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m1_mu( @@ -373,7 +373,7 @@ vfloat64m8_t test_vle64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vle64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m1_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m1_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m2_mu( @@ -382,7 +382,7 @@ vint64m1_t test_vle64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vle64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m2_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m4_mu( @@ -391,7 +391,7 @@ vint64m2_t test_vle64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vle64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m4_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_i64m8_mu( @@ -400,7 +400,7 @@ vint64m4_t test_vle64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vle64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return vle64_v_i64m8_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_i64m8_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m1_mu( @@ -409,7 +409,7 @@ vint64m8_t test_vle64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vle64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m1_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m1_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m2_mu( @@ -418,7 +418,7 @@ vuint64m1_t test_vle64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vle64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m2_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m4_mu( @@ -427,7 +427,7 @@ vuint64m2_t test_vle64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vle64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m4_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle64_v_u64m8_mu( @@ -436,6 +436,6 @@ vuint64m4_t test_vle64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vle64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return vle64_v_u64m8_mu(mask, maskedoff, base, vl); + return __riscv_vle64_v_u64m8_mu(mask, maskedoff, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle64ff.c index 573107deabae..c080d01ea112 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle64ff.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64ff_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m1_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m1_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_tu( @@ -28,7 +28,7 @@ vfloat64m1_t test_vle64ff_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64ff_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_tu( @@ -40,7 +40,7 @@ vfloat64m2_t test_vle64ff_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64ff_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_tu( @@ -52,7 +52,7 @@ vfloat64m4_t test_vle64ff_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64ff_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_tu( @@ -64,7 +64,7 @@ vfloat64m8_t test_vle64ff_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64ff_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m1_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m1_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_tu( @@ -76,7 +76,7 @@ vint64m1_t test_vle64ff_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64ff_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_tu( @@ -88,7 +88,7 @@ vint64m2_t test_vle64ff_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64ff_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_tu( @@ -100,7 +100,7 @@ vint64m4_t test_vle64ff_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64ff_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_tu( @@ -112,7 +112,7 @@ vint64m8_t test_vle64ff_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, si // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64ff_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m1_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m1_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_tu( @@ -124,7 +124,7 @@ vuint64m1_t test_vle64ff_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64ff_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_tu( @@ -136,7 +136,7 @@ vuint64m2_t test_vle64ff_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64ff_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_tu( @@ -148,7 +148,7 @@ vuint64m4_t test_vle64ff_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64ff_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_tum( @@ -160,7 +160,7 @@ vuint64m8_t test_vle64ff_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64ff_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m1_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m1_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_tum( @@ -172,7 +172,7 @@ vfloat64m1_t test_vle64ff_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64ff_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_tum( @@ -184,7 +184,7 @@ vfloat64m2_t test_vle64ff_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64ff_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_tum( @@ -196,7 +196,7 @@ vfloat64m4_t test_vle64ff_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64ff_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_tum( @@ -208,7 +208,7 @@ vfloat64m8_t test_vle64ff_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64ff_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m1_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m1_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_tum( @@ -220,7 +220,7 @@ vint64m1_t test_vle64ff_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64ff_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_tum( @@ -232,7 +232,7 @@ vint64m2_t test_vle64ff_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64ff_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_tum( @@ -244,7 +244,7 @@ vint64m4_t test_vle64ff_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64ff_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_tum( @@ -256,7 +256,7 @@ vint64m8_t test_vle64ff_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64ff_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m1_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m1_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_tum( @@ -268,7 +268,7 @@ vuint64m1_t test_vle64ff_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64ff_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_tum( @@ -280,7 +280,7 @@ vuint64m2_t test_vle64ff_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64ff_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_tum( @@ -292,7 +292,7 @@ vuint64m4_t test_vle64ff_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64ff_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_tumu( @@ -304,7 +304,7 @@ vuint64m8_t test_vle64ff_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64ff_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m1_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m1_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_tumu( @@ -316,7 +316,7 @@ vfloat64m1_t test_vle64ff_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64ff_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_tumu( @@ -328,7 +328,7 @@ vfloat64m2_t test_vle64ff_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64ff_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_tumu( @@ -340,7 +340,7 @@ vfloat64m4_t test_vle64ff_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64ff_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_tumu( @@ -352,7 +352,7 @@ vfloat64m8_t test_vle64ff_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64ff_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m1_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m1_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_tumu( @@ -364,7 +364,7 @@ vint64m1_t test_vle64ff_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64ff_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_tumu( @@ -376,7 +376,7 @@ vint64m2_t test_vle64ff_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64ff_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_tumu( @@ -388,7 +388,7 @@ vint64m4_t test_vle64ff_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64ff_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_tumu( @@ -400,7 +400,7 @@ vint64m8_t test_vle64ff_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64ff_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m1_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m1_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_tumu( @@ -412,7 +412,7 @@ vuint64m1_t test_vle64ff_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64ff_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_tumu( @@ -424,7 +424,7 @@ vuint64m2_t test_vle64ff_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64ff_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_tumu( @@ -436,7 +436,7 @@ vuint64m4_t test_vle64ff_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64ff_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_mu( @@ -448,7 +448,7 @@ vuint64m8_t test_vle64ff_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64ff_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m1_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m1_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_mu( @@ -460,7 +460,7 @@ vfloat64m1_t test_vle64ff_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64ff_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_mu( @@ -472,7 +472,7 @@ vfloat64m2_t test_vle64ff_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64ff_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_mu( @@ -484,7 +484,7 @@ vfloat64m4_t test_vle64ff_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64ff_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) { - return vle64ff_v_f64m8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_f64m8_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_mu( @@ -496,7 +496,7 @@ vfloat64m8_t test_vle64ff_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64ff_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m1_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m1_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_mu( @@ -508,7 +508,7 @@ vint64m1_t test_vle64ff_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64ff_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_mu( @@ -520,7 +520,7 @@ vint64m2_t test_vle64ff_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64ff_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_mu( @@ -532,7 +532,7 @@ vint64m4_t test_vle64ff_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64ff_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_i64m8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_i64m8_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_mu( @@ -544,7 +544,7 @@ vint64m8_t test_vle64ff_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64ff_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m1_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m1_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_mu( @@ -556,7 +556,7 @@ vuint64m1_t test_vle64ff_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64ff_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_mu( @@ -568,7 +568,7 @@ vuint64m2_t test_vle64ff_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64ff_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_mu( @@ -580,6 +580,6 @@ vuint64m4_t test_vle64ff_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64ff_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return vle64ff_v_u64m8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle64ff_v_u64m8_mu(mask, maskedoff, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle8.c index 0fb6b2922848..217388ee89cd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vle8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf8_tu(maskedoff, base, vl); + return __riscv_vle8_v_i8mf8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_tu( @@ -22,7 +22,7 @@ vint8mf8_t test_vle8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vle8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf4_tu(maskedoff, base, vl); + return __riscv_vle8_v_i8mf4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_tu( @@ -31,7 +31,7 @@ vint8mf4_t test_vle8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vle8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf2_tu(maskedoff, base, vl); + return __riscv_vle8_v_i8mf2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m1_tu( @@ -40,7 +40,7 @@ vint8mf2_t test_vle8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vle8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m1_tu(maskedoff, base, vl); + return __riscv_vle8_v_i8m1_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m2_tu( @@ -49,7 +49,7 @@ vint8m1_t test_vle8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vle8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m2_tu(maskedoff, base, vl); + return __riscv_vle8_v_i8m2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m4_tu( @@ -58,7 +58,7 @@ vint8m2_t test_vle8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vle8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m4_tu(maskedoff, base, vl); + return __riscv_vle8_v_i8m4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m8_tu( @@ -67,7 +67,7 @@ vint8m4_t test_vle8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vle8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m8_tu(maskedoff, base, vl); + return __riscv_vle8_v_i8m8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_tu( @@ -76,7 +76,7 @@ vint8m8_t test_vle8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vle8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf8_tu(maskedoff, base, vl); + return __riscv_vle8_v_u8mf8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_tu( @@ -85,7 +85,7 @@ vuint8mf8_t test_vle8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vle8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf4_tu(maskedoff, base, vl); + return __riscv_vle8_v_u8mf4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_tu( @@ -94,7 +94,7 @@ vuint8mf4_t test_vle8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vle8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf2_tu(maskedoff, base, vl); + return __riscv_vle8_v_u8mf2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m1_tu( @@ -103,7 +103,7 @@ vuint8mf2_t test_vle8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vle8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m1_tu(maskedoff, base, vl); + return __riscv_vle8_v_u8m1_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m2_tu( @@ -112,7 +112,7 @@ vuint8m1_t test_vle8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vle8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m2_tu(maskedoff, base, vl); + return __riscv_vle8_v_u8m2_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m4_tu( @@ -121,7 +121,7 @@ vuint8m2_t test_vle8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vle8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m4_tu(maskedoff, base, vl); + return __riscv_vle8_v_u8m4_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m8_tu( @@ -130,7 +130,7 @@ vuint8m4_t test_vle8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vle8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m8_tu(maskedoff, base, vl); + return __riscv_vle8_v_u8m8_tu(maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_tum( @@ -139,7 +139,7 @@ vuint8m8_t test_vle8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vle8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf8_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8mf8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_tum( @@ -148,7 +148,7 @@ vint8mf8_t test_vle8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vle8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf4_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8mf4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_tum( @@ -157,7 +157,7 @@ vint8mf4_t test_vle8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vle8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf2_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8mf2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m1_tum( @@ -166,7 +166,7 @@ vint8mf2_t test_vle8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vle8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m1_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m1_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m2_tum( @@ -175,7 +175,7 @@ vint8m1_t test_vle8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vle8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m2_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m4_tum( @@ -184,7 +184,7 @@ vint8m2_t test_vle8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vle8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m4_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m8_tum( @@ -193,7 +193,7 @@ vint8m4_t test_vle8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vle8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m8_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_tum( @@ -202,7 +202,7 @@ vint8m8_t test_vle8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vle8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf8_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8mf8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_tum( @@ -211,7 +211,7 @@ vuint8mf8_t test_vle8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vle8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf4_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8mf4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_tum( @@ -220,7 +220,7 @@ vuint8mf4_t test_vle8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vle8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf2_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8mf2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m1_tum( @@ -229,7 +229,7 @@ vuint8mf2_t test_vle8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vle8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m1_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m1_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m2_tum( @@ -238,7 +238,7 @@ vuint8m1_t test_vle8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vle8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m2_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m2_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m4_tum( @@ -247,7 +247,7 @@ vuint8m2_t test_vle8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vle8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m4_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m4_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m8_tum( @@ -256,7 +256,7 @@ vuint8m4_t test_vle8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vle8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m8_tum(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m8_tum(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_tumu( @@ -265,7 +265,7 @@ vuint8m8_t test_vle8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vle8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf8_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8mf8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_tumu( @@ -274,7 +274,7 @@ vint8mf8_t test_vle8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vle8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf4_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8mf4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_tumu( @@ -283,7 +283,7 @@ vint8mf4_t test_vle8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vle8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf2_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8mf2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m1_tumu( @@ -292,7 +292,7 @@ vint8mf2_t test_vle8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vle8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m1_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m1_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m2_tumu( @@ -301,7 +301,7 @@ vint8m1_t test_vle8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vle8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m2_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m4_tumu( @@ -310,7 +310,7 @@ vint8m2_t test_vle8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vle8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m4_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m8_tumu( @@ -319,7 +319,7 @@ vint8m4_t test_vle8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vle8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m8_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_tumu( @@ -328,7 +328,7 @@ vint8m8_t test_vle8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vle8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf8_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8mf8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_tumu( @@ -337,7 +337,7 @@ vuint8mf8_t test_vle8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vle8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf4_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8mf4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_tumu( @@ -346,7 +346,7 @@ vuint8mf4_t test_vle8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vle8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf2_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8mf2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m1_tumu( @@ -355,7 +355,7 @@ vuint8mf2_t test_vle8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vle8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m1_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m1_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m2_tumu( @@ -364,7 +364,7 @@ vuint8m1_t test_vle8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vle8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m2_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m2_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m4_tumu( @@ -373,7 +373,7 @@ vuint8m2_t test_vle8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vle8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m4_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m4_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m8_tumu( @@ -382,7 +382,7 @@ vuint8m4_t test_vle8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vle8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m8_tumu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m8_tumu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_mu( @@ -391,7 +391,7 @@ vuint8m8_t test_vle8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vle8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf8_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8mf8_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_mu( @@ -400,7 +400,7 @@ vint8mf8_t test_vle8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vle8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf4_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8mf4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_mu( @@ -409,7 +409,7 @@ vint8mf4_t test_vle8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vle8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8mf2_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8mf2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m1_mu( @@ -418,7 +418,7 @@ vint8mf2_t test_vle8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vle8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m1_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m1_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m2_mu( @@ -427,7 +427,7 @@ vint8m1_t test_vle8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t * // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vle8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m2_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m4_mu( @@ -436,7 +436,7 @@ vint8m2_t test_vle8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t * // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vle8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m4_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_i8m8_mu( @@ -445,7 +445,7 @@ vint8m4_t test_vle8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t * // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vle8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return vle8_v_i8m8_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_i8m8_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_mu( @@ -454,7 +454,7 @@ vint8m8_t test_vle8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t * // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vle8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf8_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8mf8_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_mu( @@ -463,7 +463,7 @@ vuint8mf8_t test_vle8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vle8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf4_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8mf4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_mu( @@ -472,7 +472,7 @@ vuint8mf4_t test_vle8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vle8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8mf2_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8mf2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m1_mu( @@ -481,7 +481,7 @@ vuint8mf2_t test_vle8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vle8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m1_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m1_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m2_mu( @@ -490,7 +490,7 @@ vuint8m1_t test_vle8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vle8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m2_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m2_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m4_mu( @@ -499,7 +499,7 @@ vuint8m2_t test_vle8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vle8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m4_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m4_mu(mask, maskedoff, base, vl); } // CHECK-RV64-LABEL: @test_vle8_v_u8m8_mu( @@ -508,6 +508,6 @@ vuint8m4_t test_vle8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vle8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8_v_u8m8_mu(mask, maskedoff, base, vl); + return __riscv_vle8_v_u8m8_mu(mask, maskedoff, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle8ff.c index e6e6488b073c..3b000648d8ba 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vle8ff.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8ff_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_tu( @@ -28,7 +28,7 @@ vint8mf8_t test_vle8ff_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8ff_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_tu( @@ -40,7 +40,7 @@ vint8mf4_t test_vle8ff_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8ff_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_tu( @@ -52,7 +52,7 @@ vint8mf2_t test_vle8ff_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8ff_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m1_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m1_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_tu( @@ -64,7 +64,7 @@ vint8m1_t test_vle8ff_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8ff_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_tu( @@ -76,7 +76,7 @@ vint8m2_t test_vle8ff_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8ff_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_tu( @@ -88,7 +88,7 @@ vint8m4_t test_vle8ff_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8ff_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_tu( @@ -100,7 +100,7 @@ vint8m8_t test_vle8ff_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8ff_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_tu( @@ -112,7 +112,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, s // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8ff_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_tu( @@ -124,7 +124,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, s // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8ff_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_tu( @@ -136,7 +136,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, s // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8ff_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m1_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m1_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_tu( @@ -148,7 +148,7 @@ vuint8m1_t test_vle8ff_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8ff_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m2_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m2_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_tu( @@ -160,7 +160,7 @@ vuint8m2_t test_vle8ff_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8ff_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m4_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m4_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_tu( @@ -172,7 +172,7 @@ vuint8m4_t test_vle8ff_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8ff_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m8_tu(maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m8_tu(maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_tum( @@ -184,7 +184,7 @@ vuint8m8_t test_vle8ff_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8ff_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_tum( @@ -196,7 +196,7 @@ vint8mf8_t test_vle8ff_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8ff_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_tum( @@ -208,7 +208,7 @@ vint8mf4_t test_vle8ff_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8ff_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_tum( @@ -220,7 +220,7 @@ vint8mf2_t test_vle8ff_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8ff_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m1_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m1_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_tum( @@ -232,7 +232,7 @@ vint8m1_t test_vle8ff_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8ff_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_tum( @@ -244,7 +244,7 @@ vint8m2_t test_vle8ff_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8ff_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_tum( @@ -256,7 +256,7 @@ vint8m4_t test_vle8ff_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8ff_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_tum( @@ -268,7 +268,7 @@ vint8m8_t test_vle8ff_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8ff_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_tum( @@ -280,7 +280,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8ff_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_tum( @@ -292,7 +292,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8ff_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_tum( @@ -304,7 +304,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8ff_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m1_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m1_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_tum( @@ -316,7 +316,7 @@ vuint8m1_t test_vle8ff_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8ff_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m2_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m2_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_tum( @@ -328,7 +328,7 @@ vuint8m2_t test_vle8ff_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8ff_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m4_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m4_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_tum( @@ -340,7 +340,7 @@ vuint8m4_t test_vle8ff_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8ff_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m8_tum(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m8_tum(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_tumu( @@ -352,7 +352,7 @@ vuint8m8_t test_vle8ff_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8ff_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_tumu( @@ -364,7 +364,7 @@ vint8mf8_t test_vle8ff_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8ff_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_tumu( @@ -376,7 +376,7 @@ vint8mf4_t test_vle8ff_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8ff_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_tumu( @@ -388,7 +388,7 @@ vint8mf2_t test_vle8ff_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8ff_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m1_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m1_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_tumu( @@ -400,7 +400,7 @@ vint8m1_t test_vle8ff_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8ff_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_tumu( @@ -412,7 +412,7 @@ vint8m2_t test_vle8ff_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8ff_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_tumu( @@ -424,7 +424,7 @@ vint8m4_t test_vle8ff_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8ff_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_tumu( @@ -436,7 +436,7 @@ vint8m8_t test_vle8ff_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8ff_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_tumu( @@ -448,7 +448,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8ff_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_tumu( @@ -460,7 +460,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8ff_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_tumu( @@ -472,7 +472,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8ff_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m1_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m1_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_tumu( @@ -484,7 +484,7 @@ vuint8m1_t test_vle8ff_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8ff_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m2_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m2_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_tumu( @@ -496,7 +496,7 @@ vuint8m2_t test_vle8ff_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8ff_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m4_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m4_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_tumu( @@ -508,7 +508,7 @@ vuint8m4_t test_vle8ff_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8ff_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m8_tumu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m8_tumu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_mu( @@ -520,7 +520,7 @@ vuint8m8_t test_vle8ff_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8ff_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf8_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_mu( @@ -532,7 +532,7 @@ vint8mf8_t test_vle8ff_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8ff_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_mu( @@ -544,7 +544,7 @@ vint8mf4_t test_vle8ff_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8ff_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8mf2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8mf2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_mu( @@ -556,7 +556,7 @@ vint8mf2_t test_vle8ff_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8ff_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m1_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m1_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_mu( @@ -568,7 +568,7 @@ vint8m1_t test_vle8ff_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8ff_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_mu( @@ -580,7 +580,7 @@ vint8m2_t test_vle8ff_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8ff_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_mu( @@ -592,7 +592,7 @@ vint8m4_t test_vle8ff_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8ff_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_i8m8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_i8m8_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_mu( @@ -604,7 +604,7 @@ vint8m8_t test_vle8ff_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8ff_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf8_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_mu( @@ -616,7 +616,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8ff_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_mu( @@ -628,7 +628,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8ff_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8mf2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8mf2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_mu( @@ -640,7 +640,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8ff_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m1_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m1_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_mu( @@ -652,7 +652,7 @@ vuint8m1_t test_vle8ff_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8ff_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m2_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m2_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_mu( @@ -664,7 +664,7 @@ vuint8m2_t test_vle8ff_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8ff_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m4_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m4_mu(mask, maskedoff, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_mu( @@ -676,6 +676,6 @@ vuint8m4_t test_vle8ff_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8ff_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return vle8ff_v_u8m8_mu(mask, maskedoff, base, new_vl, vl); + return __riscv_vle8ff_v_u8m8_mu(mask, maskedoff, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei16.c index 0db8f629450f..d192b1aab635 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_f16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vloxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_tu( @@ -148,7 +148,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_tu( @@ -157,7 +157,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_tu( @@ -166,7 +166,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_tu( @@ -175,7 +175,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_tu( @@ -184,7 +184,7 @@ vint8m1_t test_vloxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_tu( @@ -193,7 +193,7 @@ vint8m2_t test_vloxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i8m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_tu( @@ -202,7 +202,7 @@ vint8m4_t test_vloxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_tu( @@ -211,7 +211,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_tu( @@ -220,7 +220,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_tu( @@ -229,7 +229,7 @@ vint16m1_t test_vloxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_tu( @@ -238,7 +238,7 @@ vint16m2_t test_vloxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_tu( @@ -247,7 +247,7 @@ vint16m4_t test_vloxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_tu( @@ -256,7 +256,7 @@ vint16m8_t test_vloxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_tu( @@ -265,7 +265,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_tu( @@ -274,7 +274,7 @@ vint32m1_t test_vloxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_tu( @@ -283,7 +283,7 @@ vint32m2_t test_vloxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_tu( @@ -292,7 +292,7 @@ vint32m4_t test_vloxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_tu( @@ -301,7 +301,7 @@ vint32m8_t test_vloxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_tu( @@ -310,7 +310,7 @@ vint64m1_t test_vloxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_tu( @@ -319,7 +319,7 @@ vint64m2_t test_vloxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_tu( @@ -328,7 +328,7 @@ vint64m4_t test_vloxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_tu( @@ -337,7 +337,7 @@ vint64m8_t test_vloxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_tu( @@ -346,7 +346,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_tu( @@ -355,7 +355,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_tu( @@ -364,7 +364,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_tu( @@ -373,7 +373,7 @@ vuint8m1_t test_vloxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_tu( @@ -382,7 +382,7 @@ vuint8m2_t test_vloxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u8m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_tu( @@ -391,7 +391,7 @@ vuint8m4_t test_vloxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_tu( @@ -400,7 +400,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_tu( @@ -409,7 +409,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_tu( @@ -418,7 +418,7 @@ vuint16m1_t test_vloxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_tu( @@ -427,7 +427,7 @@ vuint16m2_t test_vloxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_tu( @@ -436,7 +436,7 @@ vuint16m4_t test_vloxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_tu( @@ -445,7 +445,7 @@ vuint16m8_t test_vloxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_tu( @@ -454,7 +454,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_tu( @@ -463,7 +463,7 @@ vuint32m1_t test_vloxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_tu( @@ -472,7 +472,7 @@ vuint32m2_t test_vloxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_tu( @@ -481,7 +481,7 @@ vuint32m4_t test_vloxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_tu( @@ -490,7 +490,7 @@ vuint32m8_t test_vloxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_tu( @@ -499,7 +499,7 @@ vuint64m1_t test_vloxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_tu( @@ -508,7 +508,7 @@ vuint64m2_t test_vloxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_tu( @@ -517,7 +517,7 @@ vuint64m4_t test_vloxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_tum( @@ -526,7 +526,7 @@ vuint64m8_t test_vloxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_tum( @@ -535,7 +535,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_tum( @@ -544,7 +544,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_tum( @@ -553,7 +553,7 @@ vfloat16m1_t test_vloxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_tum( @@ -562,7 +562,7 @@ vfloat16m2_t test_vloxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_tum( @@ -571,7 +571,7 @@ vfloat16m4_t test_vloxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_f16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_tum( @@ -580,7 +580,7 @@ vfloat16m8_t test_vloxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_tum( @@ -589,7 +589,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_tum( @@ -598,7 +598,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_tum( @@ -607,7 +607,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_tum( @@ -616,7 +616,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_tum( @@ -625,7 +625,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_tum( @@ -634,7 +634,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_tum( @@ -643,7 +643,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_tum( @@ -652,7 +652,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_tum( @@ -661,7 +661,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_tum( @@ -670,7 +670,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_tum( @@ -679,7 +679,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_tum( @@ -688,7 +688,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_tum( @@ -697,7 +697,7 @@ vint8m1_t test_vloxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_tum( @@ -706,7 +706,7 @@ vint8m2_t test_vloxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i8m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_tum( @@ -715,7 +715,7 @@ vint8m4_t test_vloxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_tum( @@ -724,7 +724,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_tum( @@ -733,7 +733,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_tum( @@ -742,7 +742,7 @@ vint16m1_t test_vloxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_tum( @@ -751,7 +751,7 @@ vint16m2_t test_vloxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_tum( @@ -760,7 +760,7 @@ vint16m4_t test_vloxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_tum( @@ -769,7 +769,7 @@ vint16m8_t test_vloxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_tum( @@ -778,7 +778,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_tum( @@ -787,7 +787,7 @@ vint32m1_t test_vloxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_tum( @@ -796,7 +796,7 @@ vint32m2_t test_vloxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_tum( @@ -805,7 +805,7 @@ vint32m4_t test_vloxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_tum( @@ -814,7 +814,7 @@ vint32m8_t test_vloxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_tum( @@ -823,7 +823,7 @@ vint64m1_t test_vloxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_tum( @@ -832,7 +832,7 @@ vint64m2_t test_vloxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_tum( @@ -841,7 +841,7 @@ vint64m4_t test_vloxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_tum( @@ -850,7 +850,7 @@ vint64m8_t test_vloxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_tum( @@ -859,7 +859,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_tum( @@ -868,7 +868,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_tum( @@ -877,7 +877,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_tum( @@ -886,7 +886,7 @@ vuint8m1_t test_vloxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_tum( @@ -895,7 +895,7 @@ vuint8m2_t test_vloxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u8m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_tum( @@ -904,7 +904,7 @@ vuint8m4_t test_vloxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_tum( @@ -913,7 +913,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_tum( @@ -922,7 +922,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_tum( @@ -931,7 +931,7 @@ vuint16m1_t test_vloxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_tum( @@ -940,7 +940,7 @@ vuint16m2_t test_vloxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_tum( @@ -949,7 +949,7 @@ vuint16m4_t test_vloxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_tum( @@ -958,7 +958,7 @@ vuint16m8_t test_vloxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_tum( @@ -967,7 +967,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_tum( @@ -976,7 +976,7 @@ vuint32m1_t test_vloxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_tum( @@ -985,7 +985,7 @@ vuint32m2_t test_vloxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_tum( @@ -994,7 +994,7 @@ vuint32m4_t test_vloxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_tum( @@ -1003,7 +1003,7 @@ vuint32m8_t test_vloxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_tum( @@ -1012,7 +1012,7 @@ vuint64m1_t test_vloxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_tum( @@ -1021,7 +1021,7 @@ vuint64m2_t test_vloxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_tum( @@ -1030,7 +1030,7 @@ vuint64m4_t test_vloxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_tumu( @@ -1039,7 +1039,7 @@ vuint64m8_t test_vloxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_tumu( @@ -1048,7 +1048,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_tumu( @@ -1057,7 +1057,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_tumu( @@ -1066,7 +1066,7 @@ vfloat16m1_t test_vloxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_tumu( @@ -1075,7 +1075,7 @@ vfloat16m2_t test_vloxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_tumu( @@ -1084,7 +1084,7 @@ vfloat16m4_t test_vloxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_tumu( @@ -1093,7 +1093,7 @@ vfloat16m8_t test_vloxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_tumu( @@ -1102,7 +1102,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_tumu( @@ -1111,7 +1111,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_tumu( @@ -1120,7 +1120,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_tumu( @@ -1129,7 +1129,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_tumu( @@ -1138,7 +1138,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_tumu( @@ -1147,7 +1147,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_tumu( @@ -1156,7 +1156,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_tumu( @@ -1165,7 +1165,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_tumu( @@ -1174,7 +1174,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_tumu( @@ -1183,7 +1183,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_tumu( @@ -1192,7 +1192,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_tumu( @@ -1201,7 +1201,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_tumu( @@ -1210,7 +1210,7 @@ vint8m1_t test_vloxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_tumu( @@ -1219,7 +1219,7 @@ vint8m2_t test_vloxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_tumu( @@ -1228,7 +1228,7 @@ vint8m4_t test_vloxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_tumu( @@ -1237,7 +1237,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_tumu( @@ -1246,7 +1246,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_tumu( @@ -1255,7 +1255,7 @@ vint16m1_t test_vloxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_tumu( @@ -1264,7 +1264,7 @@ vint16m2_t test_vloxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_tumu( @@ -1273,7 +1273,7 @@ vint16m4_t test_vloxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_tumu( @@ -1282,7 +1282,7 @@ vint16m8_t test_vloxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_tumu( @@ -1291,7 +1291,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_tumu( @@ -1300,7 +1300,7 @@ vint32m1_t test_vloxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_tumu( @@ -1309,7 +1309,7 @@ vint32m2_t test_vloxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_tumu( @@ -1318,7 +1318,7 @@ vint32m4_t test_vloxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_tumu( @@ -1327,7 +1327,7 @@ vint32m8_t test_vloxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_tumu( @@ -1336,7 +1336,7 @@ vint64m1_t test_vloxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_tumu( @@ -1345,7 +1345,7 @@ vint64m2_t test_vloxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_tumu( @@ -1354,7 +1354,7 @@ vint64m4_t test_vloxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_tumu( @@ -1363,7 +1363,7 @@ vint64m8_t test_vloxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_tumu( @@ -1372,7 +1372,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_tumu( @@ -1381,7 +1381,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_tumu( @@ -1390,7 +1390,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_tumu( @@ -1399,7 +1399,7 @@ vuint8m1_t test_vloxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_tumu( @@ -1408,7 +1408,7 @@ vuint8m2_t test_vloxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_tumu( @@ -1417,7 +1417,7 @@ vuint8m4_t test_vloxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_tumu( @@ -1426,7 +1426,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_tumu( @@ -1435,7 +1435,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_tumu( @@ -1444,7 +1444,7 @@ vuint16m1_t test_vloxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_tumu( @@ -1453,7 +1453,7 @@ vuint16m2_t test_vloxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_tumu( @@ -1462,7 +1462,7 @@ vuint16m4_t test_vloxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_tumu( @@ -1471,7 +1471,7 @@ vuint16m8_t test_vloxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_tumu( @@ -1480,7 +1480,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_tumu( @@ -1489,7 +1489,7 @@ vuint32m1_t test_vloxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_tumu( @@ -1498,7 +1498,7 @@ vuint32m2_t test_vloxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_tumu( @@ -1507,7 +1507,7 @@ vuint32m4_t test_vloxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_tumu( @@ -1516,7 +1516,7 @@ vuint32m8_t test_vloxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_tumu( @@ -1525,7 +1525,7 @@ vuint64m1_t test_vloxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_tumu( @@ -1534,7 +1534,7 @@ vuint64m2_t test_vloxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_tumu( @@ -1543,7 +1543,7 @@ vuint64m4_t test_vloxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_mu( @@ -1552,7 +1552,7 @@ vuint64m8_t test_vloxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_mu( @@ -1561,7 +1561,7 @@ vfloat16mf4_t test_vloxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_mu( @@ -1570,7 +1570,7 @@ vfloat16mf2_t test_vloxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_mu( @@ -1579,7 +1579,7 @@ vfloat16m1_t test_vloxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_mu( @@ -1588,7 +1588,7 @@ vfloat16m2_t test_vloxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_mu( @@ -1597,7 +1597,7 @@ vfloat16m4_t test_vloxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_f16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_mu( @@ -1606,7 +1606,7 @@ vfloat16m8_t test_vloxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_mu( @@ -1615,7 +1615,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_mu( @@ -1624,7 +1624,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_mu( @@ -1633,7 +1633,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_mu( @@ -1642,7 +1642,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_mu( @@ -1651,7 +1651,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_mu( @@ -1660,7 +1660,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_mu( @@ -1669,7 +1669,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_mu( @@ -1678,7 +1678,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_f64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_mu( @@ -1687,7 +1687,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_mu( @@ -1696,7 +1696,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_mu( @@ -1705,7 +1705,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_mu( @@ -1714,7 +1714,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_mu( @@ -1723,7 +1723,7 @@ vint8m1_t test_vloxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_mu( @@ -1732,7 +1732,7 @@ vint8m2_t test_vloxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i8m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i8m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_mu( @@ -1741,7 +1741,7 @@ vint8m4_t test_vloxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_mu( @@ -1750,7 +1750,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_mu( @@ -1759,7 +1759,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_mu( @@ -1768,7 +1768,7 @@ vint16m1_t test_vloxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_mu( @@ -1777,7 +1777,7 @@ vint16m2_t test_vloxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_mu( @@ -1786,7 +1786,7 @@ vint16m4_t test_vloxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_i16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_mu( @@ -1795,7 +1795,7 @@ vint16m8_t test_vloxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_mu( @@ -1804,7 +1804,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_mu( @@ -1813,7 +1813,7 @@ vint32m1_t test_vloxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_mu( @@ -1822,7 +1822,7 @@ vint32m2_t test_vloxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_mu( @@ -1831,7 +1831,7 @@ vint32m4_t test_vloxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_i32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_mu( @@ -1840,7 +1840,7 @@ vint32m8_t test_vloxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_i64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_mu( @@ -1849,7 +1849,7 @@ vint64m1_t test_vloxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_i64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_mu( @@ -1858,7 +1858,7 @@ vint64m2_t test_vloxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_i64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_mu( @@ -1867,7 +1867,7 @@ vint64m4_t test_vloxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_i64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_i64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_mu( @@ -1876,7 +1876,7 @@ vint64m8_t test_vloxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_mu( @@ -1885,7 +1885,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_mu( @@ -1894,7 +1894,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_mu( @@ -1903,7 +1903,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_mu( @@ -1912,7 +1912,7 @@ vuint8m1_t test_vloxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_mu( @@ -1921,7 +1921,7 @@ vuint8m2_t test_vloxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u8m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u8m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_mu( @@ -1930,7 +1930,7 @@ vuint8m4_t test_vloxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_mu( @@ -1939,7 +1939,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_mu( @@ -1948,7 +1948,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_mu( @@ -1957,7 +1957,7 @@ vuint16m1_t test_vloxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_mu( @@ -1966,7 +1966,7 @@ vuint16m2_t test_vloxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_mu( @@ -1975,7 +1975,7 @@ vuint16m4_t test_vloxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_u16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_mu( @@ -1984,7 +1984,7 @@ vuint16m8_t test_vloxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_mu( @@ -1993,7 +1993,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_mu( @@ -2002,7 +2002,7 @@ vuint32m1_t test_vloxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_mu( @@ -2011,7 +2011,7 @@ vuint32m2_t test_vloxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_mu( @@ -2020,7 +2020,7 @@ vuint32m4_t test_vloxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_u32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_mu( @@ -2029,7 +2029,7 @@ vuint32m8_t test_vloxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_u64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_mu( @@ -2038,7 +2038,7 @@ vuint64m1_t test_vloxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_u64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_mu( @@ -2047,7 +2047,7 @@ vuint64m2_t test_vloxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_u64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_mu( @@ -2056,6 +2056,6 @@ vuint64m4_t test_vloxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei16_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_u64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei16_v_u64m8_mu(mask, maskedoff, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei32.c index d1f7660bca21..c95de32b0c19 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_tu( @@ -67,7 +67,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_tu( @@ -76,7 +76,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_tu( @@ -85,7 +85,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_tu( @@ -94,7 +94,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_tu( @@ -103,7 +103,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_tu( @@ -112,7 +112,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_tu( @@ -121,7 +121,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_tu( @@ -130,7 +130,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_tu( @@ -139,7 +139,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_tu( @@ -148,7 +148,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_tu( @@ -157,7 +157,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_tu( @@ -166,7 +166,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_tu( @@ -175,7 +175,7 @@ vint8m1_t test_vloxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_tu( @@ -184,7 +184,7 @@ vint8m2_t test_vloxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_tu( @@ -193,7 +193,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_tu( @@ -202,7 +202,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_tu( @@ -211,7 +211,7 @@ vint16m1_t test_vloxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_tu( @@ -220,7 +220,7 @@ vint16m2_t test_vloxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_tu( @@ -229,7 +229,7 @@ vint16m4_t test_vloxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_tu( @@ -238,7 +238,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_tu( @@ -247,7 +247,7 @@ vint32m1_t test_vloxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_tu( @@ -256,7 +256,7 @@ vint32m2_t test_vloxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_tu( @@ -265,7 +265,7 @@ vint32m4_t test_vloxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_tu( @@ -274,7 +274,7 @@ vint32m8_t test_vloxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_tu( @@ -283,7 +283,7 @@ vint64m1_t test_vloxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_tu( @@ -292,7 +292,7 @@ vint64m2_t test_vloxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_tu( @@ -301,7 +301,7 @@ vint64m4_t test_vloxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_tu( @@ -310,7 +310,7 @@ vint64m8_t test_vloxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_tu( @@ -319,7 +319,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_tu( @@ -328,7 +328,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_tu( @@ -337,7 +337,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_tu( @@ -346,7 +346,7 @@ vuint8m1_t test_vloxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_tu( @@ -355,7 +355,7 @@ vuint8m2_t test_vloxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_tu( @@ -364,7 +364,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_tu( @@ -373,7 +373,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_tu( @@ -382,7 +382,7 @@ vuint16m1_t test_vloxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_tu( @@ -391,7 +391,7 @@ vuint16m2_t test_vloxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_tu( @@ -400,7 +400,7 @@ vuint16m4_t test_vloxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_tu( @@ -409,7 +409,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_tu( @@ -418,7 +418,7 @@ vuint32m1_t test_vloxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_tu( @@ -427,7 +427,7 @@ vuint32m2_t test_vloxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_tu( @@ -436,7 +436,7 @@ vuint32m4_t test_vloxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_tu( @@ -445,7 +445,7 @@ vuint32m8_t test_vloxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_tu( @@ -454,7 +454,7 @@ vuint64m1_t test_vloxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_tu( @@ -463,7 +463,7 @@ vuint64m2_t test_vloxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_tu( @@ -472,7 +472,7 @@ vuint64m4_t test_vloxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_tum( @@ -481,7 +481,7 @@ vuint64m8_t test_vloxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_tum( @@ -490,7 +490,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_tum( @@ -499,7 +499,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_tum( @@ -508,7 +508,7 @@ vfloat16m1_t test_vloxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_tum( @@ -517,7 +517,7 @@ vfloat16m2_t test_vloxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_tum( @@ -526,7 +526,7 @@ vfloat16m4_t test_vloxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_tum( @@ -535,7 +535,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_tum( @@ -544,7 +544,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_tum( @@ -553,7 +553,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_tum( @@ -562,7 +562,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_tum( @@ -571,7 +571,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_tum( @@ -580,7 +580,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_tum( @@ -589,7 +589,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_tum( @@ -598,7 +598,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_tum( @@ -607,7 +607,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_tum( @@ -616,7 +616,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_tum( @@ -625,7 +625,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_tum( @@ -634,7 +634,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_tum( @@ -643,7 +643,7 @@ vint8m1_t test_vloxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_tum( @@ -652,7 +652,7 @@ vint8m2_t test_vloxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_tum( @@ -661,7 +661,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_tum( @@ -670,7 +670,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_tum( @@ -679,7 +679,7 @@ vint16m1_t test_vloxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_tum( @@ -688,7 +688,7 @@ vint16m2_t test_vloxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_tum( @@ -697,7 +697,7 @@ vint16m4_t test_vloxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_tum( @@ -706,7 +706,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_tum( @@ -715,7 +715,7 @@ vint32m1_t test_vloxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_tum( @@ -724,7 +724,7 @@ vint32m2_t test_vloxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_tum( @@ -733,7 +733,7 @@ vint32m4_t test_vloxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_tum( @@ -742,7 +742,7 @@ vint32m8_t test_vloxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_tum( @@ -751,7 +751,7 @@ vint64m1_t test_vloxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_tum( @@ -760,7 +760,7 @@ vint64m2_t test_vloxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_tum( @@ -769,7 +769,7 @@ vint64m4_t test_vloxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_tum( @@ -778,7 +778,7 @@ vint64m8_t test_vloxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_tum( @@ -787,7 +787,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_tum( @@ -796,7 +796,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_tum( @@ -805,7 +805,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_tum( @@ -814,7 +814,7 @@ vuint8m1_t test_vloxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_tum( @@ -823,7 +823,7 @@ vuint8m2_t test_vloxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_tum( @@ -832,7 +832,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_tum( @@ -841,7 +841,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_tum( @@ -850,7 +850,7 @@ vuint16m1_t test_vloxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_tum( @@ -859,7 +859,7 @@ vuint16m2_t test_vloxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_tum( @@ -868,7 +868,7 @@ vuint16m4_t test_vloxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_tum( @@ -877,7 +877,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_tum( @@ -886,7 +886,7 @@ vuint32m1_t test_vloxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_tum( @@ -895,7 +895,7 @@ vuint32m2_t test_vloxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_tum( @@ -904,7 +904,7 @@ vuint32m4_t test_vloxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_tum( @@ -913,7 +913,7 @@ vuint32m8_t test_vloxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_tum( @@ -922,7 +922,7 @@ vuint64m1_t test_vloxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_tum( @@ -931,7 +931,7 @@ vuint64m2_t test_vloxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_tum( @@ -940,7 +940,7 @@ vuint64m4_t test_vloxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_tumu( @@ -949,7 +949,7 @@ vuint64m8_t test_vloxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_tumu( @@ -958,7 +958,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_tumu( @@ -967,7 +967,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_tumu( @@ -976,7 +976,7 @@ vfloat16m1_t test_vloxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_tumu( @@ -985,7 +985,7 @@ vfloat16m2_t test_vloxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_tumu( @@ -994,7 +994,7 @@ vfloat16m4_t test_vloxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_tumu( @@ -1003,7 +1003,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_tumu( @@ -1012,7 +1012,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_tumu( @@ -1021,7 +1021,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_tumu( @@ -1030,7 +1030,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_tumu( @@ -1039,7 +1039,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_tumu( @@ -1048,7 +1048,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_tumu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_tumu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_tumu( @@ -1075,7 +1075,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_tumu( @@ -1084,7 +1084,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_tumu( @@ -1093,7 +1093,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_tumu( @@ -1102,7 +1102,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_tumu( @@ -1111,7 +1111,7 @@ vint8m1_t test_vloxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_tumu( @@ -1120,7 +1120,7 @@ vint8m2_t test_vloxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_tumu( @@ -1129,7 +1129,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_tumu( @@ -1138,7 +1138,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_tumu( @@ -1147,7 +1147,7 @@ vint16m1_t test_vloxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_tumu( @@ -1156,7 +1156,7 @@ vint16m2_t test_vloxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_tumu( @@ -1165,7 +1165,7 @@ vint16m4_t test_vloxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_tumu( @@ -1174,7 +1174,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_tumu( @@ -1183,7 +1183,7 @@ vint32m1_t test_vloxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_tumu( @@ -1192,7 +1192,7 @@ vint32m2_t test_vloxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_tumu( @@ -1201,7 +1201,7 @@ vint32m4_t test_vloxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_tumu( @@ -1210,7 +1210,7 @@ vint32m8_t test_vloxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_tumu( @@ -1219,7 +1219,7 @@ vint64m1_t test_vloxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_tumu( @@ -1228,7 +1228,7 @@ vint64m2_t test_vloxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_tumu( @@ -1237,7 +1237,7 @@ vint64m4_t test_vloxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_tumu( @@ -1246,7 +1246,7 @@ vint64m8_t test_vloxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_tumu( @@ -1255,7 +1255,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_tumu( @@ -1264,7 +1264,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_tumu( @@ -1273,7 +1273,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_tumu( @@ -1282,7 +1282,7 @@ vuint8m1_t test_vloxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_tumu( @@ -1291,7 +1291,7 @@ vuint8m2_t test_vloxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_tumu( @@ -1300,7 +1300,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_tumu( @@ -1309,7 +1309,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_tumu( @@ -1318,7 +1318,7 @@ vuint16m1_t test_vloxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_tumu( @@ -1327,7 +1327,7 @@ vuint16m2_t test_vloxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_tumu( @@ -1336,7 +1336,7 @@ vuint16m4_t test_vloxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_tumu( @@ -1345,7 +1345,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_tumu( @@ -1354,7 +1354,7 @@ vuint32m1_t test_vloxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_tumu( @@ -1363,7 +1363,7 @@ vuint32m2_t test_vloxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_tumu( @@ -1372,7 +1372,7 @@ vuint32m4_t test_vloxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_tumu( @@ -1381,7 +1381,7 @@ vuint32m8_t test_vloxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_tumu( @@ -1390,7 +1390,7 @@ vuint64m1_t test_vloxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_tumu( @@ -1399,7 +1399,7 @@ vuint64m2_t test_vloxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_tumu( @@ -1408,7 +1408,7 @@ vuint64m4_t test_vloxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_mu( @@ -1417,7 +1417,7 @@ vuint64m8_t test_vloxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_mu( @@ -1426,7 +1426,7 @@ vfloat16mf4_t test_vloxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_mu( @@ -1435,7 +1435,7 @@ vfloat16mf2_t test_vloxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_mu( @@ -1444,7 +1444,7 @@ vfloat16m1_t test_vloxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_mu( @@ -1453,7 +1453,7 @@ vfloat16m2_t test_vloxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_mu( @@ -1462,7 +1462,7 @@ vfloat16m4_t test_vloxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_mu( @@ -1471,7 +1471,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_mu( @@ -1480,7 +1480,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_mu( @@ -1489,7 +1489,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_mu( @@ -1498,7 +1498,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_mu( @@ -1507,7 +1507,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_mu( @@ -1516,7 +1516,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_mu( @@ -1525,7 +1525,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_mu( @@ -1534,7 +1534,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_f64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_mu( @@ -1543,7 +1543,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_mu( @@ -1552,7 +1552,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_mu( @@ -1561,7 +1561,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_mu( @@ -1570,7 +1570,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_mu( @@ -1579,7 +1579,7 @@ vint8m1_t test_vloxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_mu( @@ -1588,7 +1588,7 @@ vint8m2_t test_vloxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_mu( @@ -1597,7 +1597,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_mu( @@ -1606,7 +1606,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_mu( @@ -1615,7 +1615,7 @@ vint16m1_t test_vloxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_mu( @@ -1624,7 +1624,7 @@ vint16m2_t test_vloxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_mu( @@ -1633,7 +1633,7 @@ vint16m4_t test_vloxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_mu( @@ -1642,7 +1642,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_mu( @@ -1651,7 +1651,7 @@ vint32m1_t test_vloxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_mu( @@ -1660,7 +1660,7 @@ vint32m2_t test_vloxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_mu( @@ -1669,7 +1669,7 @@ vint32m4_t test_vloxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_i32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_mu( @@ -1678,7 +1678,7 @@ vint32m8_t test_vloxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_i64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_mu( @@ -1687,7 +1687,7 @@ vint64m1_t test_vloxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_i64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_mu( @@ -1696,7 +1696,7 @@ vint64m2_t test_vloxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_i64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_mu( @@ -1705,7 +1705,7 @@ vint64m4_t test_vloxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_i64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_i64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_mu( @@ -1714,7 +1714,7 @@ vint64m8_t test_vloxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_mu( @@ -1723,7 +1723,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_mu( @@ -1732,7 +1732,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_mu( @@ -1741,7 +1741,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_mu( @@ -1750,7 +1750,7 @@ vuint8m1_t test_vloxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_mu( @@ -1759,7 +1759,7 @@ vuint8m2_t test_vloxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_mu( @@ -1768,7 +1768,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_mu( @@ -1777,7 +1777,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_mu( @@ -1786,7 +1786,7 @@ vuint16m1_t test_vloxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_mu( @@ -1795,7 +1795,7 @@ vuint16m2_t test_vloxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_mu( @@ -1804,7 +1804,7 @@ vuint16m4_t test_vloxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_mu( @@ -1813,7 +1813,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_mu( @@ -1822,7 +1822,7 @@ vuint32m1_t test_vloxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_mu( @@ -1831,7 +1831,7 @@ vuint32m2_t test_vloxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_mu( @@ -1840,7 +1840,7 @@ vuint32m4_t test_vloxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_u32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_mu( @@ -1849,7 +1849,7 @@ vuint32m8_t test_vloxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_u64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_mu( @@ -1858,7 +1858,7 @@ vuint64m1_t test_vloxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_u64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_mu( @@ -1867,7 +1867,7 @@ vuint64m2_t test_vloxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_u64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_mu( @@ -1876,6 +1876,6 @@ vuint64m4_t test_vloxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei32_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_u64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei32_v_u64m8_mu(mask, maskedoff, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei64.c index b145468ed28f..8a5426398244 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_tu( @@ -58,7 +58,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_tu( @@ -67,7 +67,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_tu( @@ -76,7 +76,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_tu( @@ -85,7 +85,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_tu( @@ -94,7 +94,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_tu( @@ -103,7 +103,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_tu( @@ -112,7 +112,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_tu( @@ -121,7 +121,7 @@ vfloat64m8_t test_vloxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_tu( @@ -130,7 +130,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_tu( @@ -139,7 +139,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_tu( @@ -148,7 +148,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_tu( @@ -157,7 +157,7 @@ vint8m1_t test_vloxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_tu( @@ -166,7 +166,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_tu( @@ -175,7 +175,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_tu( @@ -184,7 +184,7 @@ vint16m1_t test_vloxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_tu( @@ -193,7 +193,7 @@ vint16m2_t test_vloxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_tu( @@ -202,7 +202,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_tu( @@ -211,7 +211,7 @@ vint32m1_t test_vloxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_tu( @@ -220,7 +220,7 @@ vint32m2_t test_vloxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_tu( @@ -229,7 +229,7 @@ vint32m4_t test_vloxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_tu( @@ -238,7 +238,7 @@ vint64m1_t test_vloxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_tu( @@ -247,7 +247,7 @@ vint64m2_t test_vloxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_tu( @@ -256,7 +256,7 @@ vint64m4_t test_vloxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_tu( @@ -265,7 +265,7 @@ vint64m8_t test_vloxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_tu( @@ -274,7 +274,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_tu( @@ -283,7 +283,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_tu( @@ -292,7 +292,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_tu( @@ -301,7 +301,7 @@ vuint8m1_t test_vloxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_tu( @@ -310,7 +310,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_tu( @@ -319,7 +319,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_tu( @@ -328,7 +328,7 @@ vuint16m1_t test_vloxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_tu( @@ -337,7 +337,7 @@ vuint16m2_t test_vloxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_tu( @@ -346,7 +346,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_tu( @@ -355,7 +355,7 @@ vuint32m1_t test_vloxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_tu( @@ -364,7 +364,7 @@ vuint32m2_t test_vloxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_tu( @@ -373,7 +373,7 @@ vuint32m4_t test_vloxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_tu( @@ -382,7 +382,7 @@ vuint64m1_t test_vloxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_tu( @@ -391,7 +391,7 @@ vuint64m2_t test_vloxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_tu( @@ -400,7 +400,7 @@ vuint64m4_t test_vloxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_tum( @@ -409,7 +409,7 @@ vuint64m8_t test_vloxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_tum( @@ -418,7 +418,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_tum( @@ -427,7 +427,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_tum( @@ -436,7 +436,7 @@ vfloat16m1_t test_vloxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_tum( @@ -445,7 +445,7 @@ vfloat16m2_t test_vloxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_tum( @@ -454,7 +454,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_tum( @@ -463,7 +463,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_tum( @@ -472,7 +472,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_tum( @@ -499,7 +499,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_tum( @@ -508,7 +508,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_tum( @@ -517,7 +517,7 @@ vfloat64m8_t test_vloxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_tum( @@ -526,7 +526,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_tum( @@ -535,7 +535,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_tum( @@ -544,7 +544,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_tum( @@ -553,7 +553,7 @@ vint8m1_t test_vloxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_tum( @@ -562,7 +562,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_tum( @@ -571,7 +571,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_tum( @@ -580,7 +580,7 @@ vint16m1_t test_vloxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_tum( @@ -589,7 +589,7 @@ vint16m2_t test_vloxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_tum( @@ -598,7 +598,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_tum( @@ -607,7 +607,7 @@ vint32m1_t test_vloxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_tum( @@ -616,7 +616,7 @@ vint32m2_t test_vloxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_tum( @@ -625,7 +625,7 @@ vint32m4_t test_vloxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_tum( @@ -634,7 +634,7 @@ vint64m1_t test_vloxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_tum( @@ -643,7 +643,7 @@ vint64m2_t test_vloxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_tum( @@ -652,7 +652,7 @@ vint64m4_t test_vloxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_tum( @@ -661,7 +661,7 @@ vint64m8_t test_vloxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_tum( @@ -670,7 +670,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_tum( @@ -679,7 +679,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_tum( @@ -688,7 +688,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_tum( @@ -697,7 +697,7 @@ vuint8m1_t test_vloxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_tum( @@ -706,7 +706,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_tum( @@ -715,7 +715,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_tum( @@ -724,7 +724,7 @@ vuint16m1_t test_vloxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_tum( @@ -733,7 +733,7 @@ vuint16m2_t test_vloxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_tum( @@ -742,7 +742,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_tum( @@ -751,7 +751,7 @@ vuint32m1_t test_vloxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_tum( @@ -760,7 +760,7 @@ vuint32m2_t test_vloxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_tum( @@ -769,7 +769,7 @@ vuint32m4_t test_vloxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_tum( @@ -778,7 +778,7 @@ vuint64m1_t test_vloxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_tum( @@ -787,7 +787,7 @@ vuint64m2_t test_vloxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_tum( @@ -796,7 +796,7 @@ vuint64m4_t test_vloxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_tumu( @@ -805,7 +805,7 @@ vuint64m8_t test_vloxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_tumu( @@ -814,7 +814,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_tumu( @@ -823,7 +823,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_tumu( @@ -832,7 +832,7 @@ vfloat16m1_t test_vloxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_tumu( @@ -841,7 +841,7 @@ vfloat16m2_t test_vloxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_tumu( @@ -850,7 +850,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_tumu( @@ -859,7 +859,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_tumu( @@ -868,7 +868,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_tumu( @@ -877,7 +877,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_tumu( @@ -886,7 +886,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_tumu( @@ -895,7 +895,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_tumu( @@ -904,7 +904,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_tumu( @@ -913,7 +913,7 @@ vfloat64m8_t test_vloxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_tumu( @@ -922,7 +922,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_tumu( @@ -931,7 +931,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_tumu( @@ -940,7 +940,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_tumu( @@ -949,7 +949,7 @@ vint8m1_t test_vloxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_tumu( @@ -958,7 +958,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_tumu( @@ -967,7 +967,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_tumu( @@ -976,7 +976,7 @@ vint16m1_t test_vloxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_tumu( @@ -985,7 +985,7 @@ vint16m2_t test_vloxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_tumu( @@ -994,7 +994,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_tumu( @@ -1003,7 +1003,7 @@ vint32m1_t test_vloxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_tumu( @@ -1012,7 +1012,7 @@ vint32m2_t test_vloxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_tumu( @@ -1021,7 +1021,7 @@ vint32m4_t test_vloxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_tumu( @@ -1030,7 +1030,7 @@ vint64m1_t test_vloxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_tumu( @@ -1039,7 +1039,7 @@ vint64m2_t test_vloxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_tumu( @@ -1048,7 +1048,7 @@ vint64m4_t test_vloxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_tumu( @@ -1057,7 +1057,7 @@ vint64m8_t test_vloxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_tumu( @@ -1066,7 +1066,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_tumu( @@ -1075,7 +1075,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_tumu( @@ -1084,7 +1084,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_tumu( @@ -1093,7 +1093,7 @@ vuint8m1_t test_vloxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_tumu( @@ -1102,7 +1102,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_tumu( @@ -1111,7 +1111,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_tumu( @@ -1120,7 +1120,7 @@ vuint16m1_t test_vloxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_tumu( @@ -1129,7 +1129,7 @@ vuint16m2_t test_vloxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_tumu( @@ -1138,7 +1138,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_tumu( @@ -1147,7 +1147,7 @@ vuint32m1_t test_vloxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_tumu( @@ -1156,7 +1156,7 @@ vuint32m2_t test_vloxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_tumu( @@ -1165,7 +1165,7 @@ vuint32m4_t test_vloxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_tumu( @@ -1174,7 +1174,7 @@ vuint64m1_t test_vloxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_tumu( @@ -1183,7 +1183,7 @@ vuint64m2_t test_vloxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_tumu( @@ -1192,7 +1192,7 @@ vuint64m4_t test_vloxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_mu( @@ -1201,7 +1201,7 @@ vuint64m8_t test_vloxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_mu( @@ -1210,7 +1210,7 @@ vfloat16mf4_t test_vloxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_mu( @@ -1219,7 +1219,7 @@ vfloat16mf2_t test_vloxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_mu( @@ -1228,7 +1228,7 @@ vfloat16m1_t test_vloxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_mu( @@ -1237,7 +1237,7 @@ vfloat16m2_t test_vloxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_mu( @@ -1246,7 +1246,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_mu( @@ -1255,7 +1255,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_mu( @@ -1264,7 +1264,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_mu( @@ -1273,7 +1273,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_mu( @@ -1282,7 +1282,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_mu( @@ -1291,7 +1291,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_mu( @@ -1300,7 +1300,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_f64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_mu( @@ -1309,7 +1309,7 @@ vfloat64m8_t test_vloxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_mu( @@ -1318,7 +1318,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_mu( @@ -1327,7 +1327,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_mu( @@ -1336,7 +1336,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_mu( @@ -1345,7 +1345,7 @@ vint8m1_t test_vloxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_mu( @@ -1354,7 +1354,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_mu( @@ -1363,7 +1363,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_mu( @@ -1372,7 +1372,7 @@ vint16m1_t test_vloxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_mu( @@ -1381,7 +1381,7 @@ vint16m2_t test_vloxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_mu( @@ -1390,7 +1390,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_mu( @@ -1399,7 +1399,7 @@ vint32m1_t test_vloxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_mu( @@ -1408,7 +1408,7 @@ vint32m2_t test_vloxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_mu( @@ -1417,7 +1417,7 @@ vint32m4_t test_vloxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_i64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_mu( @@ -1426,7 +1426,7 @@ vint64m1_t test_vloxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_i64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_mu( @@ -1435,7 +1435,7 @@ vint64m2_t test_vloxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_i64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_mu( @@ -1444,7 +1444,7 @@ vint64m4_t test_vloxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_i64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_i64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_mu( @@ -1453,7 +1453,7 @@ vint64m8_t test_vloxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_mu( @@ -1462,7 +1462,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_mu( @@ -1471,7 +1471,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_mu( @@ -1480,7 +1480,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_mu( @@ -1489,7 +1489,7 @@ vuint8m1_t test_vloxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_mu( @@ -1498,7 +1498,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_mu( @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_mu( @@ -1516,7 +1516,7 @@ vuint16m1_t test_vloxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_mu( @@ -1525,7 +1525,7 @@ vuint16m2_t test_vloxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_mu( @@ -1534,7 +1534,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_mu( @@ -1543,7 +1543,7 @@ vuint32m1_t test_vloxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_mu( @@ -1552,7 +1552,7 @@ vuint32m2_t test_vloxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_mu( @@ -1561,7 +1561,7 @@ vuint32m4_t test_vloxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_u64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_mu( @@ -1570,7 +1570,7 @@ vuint64m1_t test_vloxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_u64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_mu( @@ -1579,7 +1579,7 @@ vuint64m2_t test_vloxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_u64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_mu( @@ -1588,6 +1588,6 @@ vuint64m4_t test_vloxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_u64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei64_v_u64m8_mu(mask, maskedoff, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei8.c index b905e2e5313c..bbc08548c927 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 * // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 * // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vloxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vloxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vloxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_f16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vloxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_tu( @@ -148,7 +148,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_tu( @@ -157,7 +157,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_tu( @@ -166,7 +166,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_tu( @@ -175,7 +175,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_tu( @@ -184,7 +184,7 @@ vint8m1_t test_vloxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_tu( @@ -193,7 +193,7 @@ vint8m2_t test_vloxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i8m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_tu( @@ -202,7 +202,7 @@ vint8m4_t test_vloxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vloxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_i8m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_tu( @@ -211,7 +211,7 @@ vint8m8_t test_vloxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_tu( @@ -220,7 +220,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_tu( @@ -229,7 +229,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_tu( @@ -238,7 +238,7 @@ vint16m1_t test_vloxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_tu( @@ -247,7 +247,7 @@ vint16m2_t test_vloxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_tu( @@ -256,7 +256,7 @@ vint16m4_t test_vloxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_tu( @@ -265,7 +265,7 @@ vint16m8_t test_vloxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_tu( @@ -274,7 +274,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_tu( @@ -283,7 +283,7 @@ vint32m1_t test_vloxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_tu( @@ -292,7 +292,7 @@ vint32m2_t test_vloxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_tu( @@ -301,7 +301,7 @@ vint32m4_t test_vloxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_tu( @@ -310,7 +310,7 @@ vint32m8_t test_vloxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_tu( @@ -319,7 +319,7 @@ vint64m1_t test_vloxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_tu( @@ -328,7 +328,7 @@ vint64m2_t test_vloxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_tu( @@ -337,7 +337,7 @@ vint64m4_t test_vloxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_tu( @@ -346,7 +346,7 @@ vint64m8_t test_vloxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_tu( @@ -355,7 +355,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_tu( @@ -364,7 +364,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_tu( @@ -373,7 +373,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_tu( @@ -382,7 +382,7 @@ vuint8m1_t test_vloxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_tu( @@ -391,7 +391,7 @@ vuint8m2_t test_vloxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u8m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_tu( @@ -400,7 +400,7 @@ vuint8m4_t test_vloxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vloxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_u8m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_tu( @@ -409,7 +409,7 @@ vuint8m8_t test_vloxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_tu( @@ -418,7 +418,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_tu( @@ -427,7 +427,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_tu( @@ -436,7 +436,7 @@ vuint16m1_t test_vloxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_tu( @@ -445,7 +445,7 @@ vuint16m2_t test_vloxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_tu( @@ -454,7 +454,7 @@ vuint16m4_t test_vloxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_tu( @@ -463,7 +463,7 @@ vuint16m8_t test_vloxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_tu( @@ -472,7 +472,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_tu( @@ -481,7 +481,7 @@ vuint32m1_t test_vloxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_tu( @@ -490,7 +490,7 @@ vuint32m2_t test_vloxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_tu( @@ -499,7 +499,7 @@ vuint32m4_t test_vloxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_tu( @@ -508,7 +508,7 @@ vuint32m8_t test_vloxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_tu( @@ -517,7 +517,7 @@ vuint64m1_t test_vloxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_tu( @@ -526,7 +526,7 @@ vuint64m2_t test_vloxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_tu( @@ -535,7 +535,7 @@ vuint64m4_t test_vloxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_tum( @@ -544,7 +544,7 @@ vuint64m8_t test_vloxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_tum( @@ -553,7 +553,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_tum( @@ -562,7 +562,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_tum( @@ -571,7 +571,7 @@ vfloat16m1_t test_vloxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_tum( @@ -580,7 +580,7 @@ vfloat16m2_t test_vloxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_tum( @@ -589,7 +589,7 @@ vfloat16m4_t test_vloxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_f16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_tum( @@ -598,7 +598,7 @@ vfloat16m8_t test_vloxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_tum( @@ -607,7 +607,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_tum( @@ -616,7 +616,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_tum( @@ -625,7 +625,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_tum( @@ -634,7 +634,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_tum( @@ -643,7 +643,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_tum( @@ -652,7 +652,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_tum( @@ -661,7 +661,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_tum( @@ -670,7 +670,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_tum( @@ -679,7 +679,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_tum( @@ -688,7 +688,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_tum( @@ -697,7 +697,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_tum( @@ -706,7 +706,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_tum( @@ -715,7 +715,7 @@ vint8m1_t test_vloxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_tum( @@ -724,7 +724,7 @@ vint8m2_t test_vloxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i8m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_tum( @@ -733,7 +733,7 @@ vint8m4_t test_vloxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vloxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_i8m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_tum( @@ -742,7 +742,7 @@ vint8m8_t test_vloxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_tum( @@ -751,7 +751,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_tum( @@ -760,7 +760,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_tum( @@ -769,7 +769,7 @@ vint16m1_t test_vloxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_tum( @@ -778,7 +778,7 @@ vint16m2_t test_vloxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_tum( @@ -787,7 +787,7 @@ vint16m4_t test_vloxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_tum( @@ -796,7 +796,7 @@ vint16m8_t test_vloxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_tum( @@ -805,7 +805,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_tum( @@ -814,7 +814,7 @@ vint32m1_t test_vloxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_tum( @@ -823,7 +823,7 @@ vint32m2_t test_vloxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_tum( @@ -832,7 +832,7 @@ vint32m4_t test_vloxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_tum( @@ -841,7 +841,7 @@ vint32m8_t test_vloxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_tum( @@ -850,7 +850,7 @@ vint64m1_t test_vloxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_tum( @@ -859,7 +859,7 @@ vint64m2_t test_vloxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_tum( @@ -868,7 +868,7 @@ vint64m4_t test_vloxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_tum( @@ -877,7 +877,7 @@ vint64m8_t test_vloxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_tum( @@ -886,7 +886,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_tum( @@ -895,7 +895,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_tum( @@ -904,7 +904,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_tum( @@ -913,7 +913,7 @@ vuint8m1_t test_vloxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_tum( @@ -922,7 +922,7 @@ vuint8m2_t test_vloxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u8m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_tum( @@ -931,7 +931,7 @@ vuint8m4_t test_vloxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vloxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_u8m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_tum( @@ -940,7 +940,7 @@ vuint8m8_t test_vloxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_tum( @@ -949,7 +949,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_tum( @@ -958,7 +958,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_tum( @@ -967,7 +967,7 @@ vuint16m1_t test_vloxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_tum( @@ -976,7 +976,7 @@ vuint16m2_t test_vloxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_tum( @@ -985,7 +985,7 @@ vuint16m4_t test_vloxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_tum( @@ -994,7 +994,7 @@ vuint16m8_t test_vloxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_tum( @@ -1003,7 +1003,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_tum( @@ -1012,7 +1012,7 @@ vuint32m1_t test_vloxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_tum( @@ -1021,7 +1021,7 @@ vuint32m2_t test_vloxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_tum( @@ -1030,7 +1030,7 @@ vuint32m4_t test_vloxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_tum( @@ -1039,7 +1039,7 @@ vuint32m8_t test_vloxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_tum( @@ -1048,7 +1048,7 @@ vuint64m1_t test_vloxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_tum( @@ -1057,7 +1057,7 @@ vuint64m2_t test_vloxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_tum( @@ -1066,7 +1066,7 @@ vuint64m4_t test_vloxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_tumu( @@ -1075,7 +1075,7 @@ vuint64m8_t test_vloxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_tumu( @@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_tumu( @@ -1093,7 +1093,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_tumu( @@ -1102,7 +1102,7 @@ vfloat16m1_t test_vloxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_tumu( @@ -1111,7 +1111,7 @@ vfloat16m2_t test_vloxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_tumu( @@ -1120,7 +1120,7 @@ vfloat16m4_t test_vloxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_tumu( @@ -1129,7 +1129,7 @@ vfloat16m8_t test_vloxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_tumu( @@ -1138,7 +1138,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_tumu( @@ -1147,7 +1147,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_tumu( @@ -1156,7 +1156,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_tumu( @@ -1165,7 +1165,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_tumu( @@ -1174,7 +1174,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_tumu( @@ -1183,7 +1183,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_tumu( @@ -1192,7 +1192,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_tumu( @@ -1201,7 +1201,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_tumu( @@ -1210,7 +1210,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_tumu( @@ -1219,7 +1219,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_tumu( @@ -1228,7 +1228,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_tumu( @@ -1237,7 +1237,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_tumu( @@ -1246,7 +1246,7 @@ vint8m1_t test_vloxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_tumu( @@ -1255,7 +1255,7 @@ vint8m2_t test_vloxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_tumu( @@ -1264,7 +1264,7 @@ vint8m4_t test_vloxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vloxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_i8m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_tumu( @@ -1273,7 +1273,7 @@ vint8m8_t test_vloxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_tumu( @@ -1282,7 +1282,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_tumu( @@ -1291,7 +1291,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_tumu( @@ -1300,7 +1300,7 @@ vint16m1_t test_vloxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_tumu( @@ -1309,7 +1309,7 @@ vint16m2_t test_vloxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_tumu( @@ -1318,7 +1318,7 @@ vint16m4_t test_vloxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_tumu( @@ -1327,7 +1327,7 @@ vint16m8_t test_vloxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_tumu( @@ -1336,7 +1336,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_tumu( @@ -1345,7 +1345,7 @@ vint32m1_t test_vloxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_tumu( @@ -1354,7 +1354,7 @@ vint32m2_t test_vloxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_tumu( @@ -1363,7 +1363,7 @@ vint32m4_t test_vloxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_tumu( @@ -1372,7 +1372,7 @@ vint32m8_t test_vloxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_tumu( @@ -1381,7 +1381,7 @@ vint64m1_t test_vloxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_tumu( @@ -1390,7 +1390,7 @@ vint64m2_t test_vloxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_tumu( @@ -1399,7 +1399,7 @@ vint64m4_t test_vloxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_tumu( @@ -1408,7 +1408,7 @@ vint64m8_t test_vloxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_tumu( @@ -1417,7 +1417,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_tumu( @@ -1426,7 +1426,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_tumu( @@ -1435,7 +1435,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_tumu( @@ -1444,7 +1444,7 @@ vuint8m1_t test_vloxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_tumu( @@ -1453,7 +1453,7 @@ vuint8m2_t test_vloxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_tumu( @@ -1462,7 +1462,7 @@ vuint8m4_t test_vloxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vloxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_u8m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_tumu( @@ -1471,7 +1471,7 @@ vuint8m8_t test_vloxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_tumu( @@ -1480,7 +1480,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_tumu( @@ -1489,7 +1489,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_tumu( @@ -1498,7 +1498,7 @@ vuint16m1_t test_vloxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_tumu( @@ -1507,7 +1507,7 @@ vuint16m2_t test_vloxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_tumu( @@ -1516,7 +1516,7 @@ vuint16m4_t test_vloxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_tumu( @@ -1525,7 +1525,7 @@ vuint16m8_t test_vloxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_tumu( @@ -1534,7 +1534,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_tumu( @@ -1543,7 +1543,7 @@ vuint32m1_t test_vloxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_tumu( @@ -1552,7 +1552,7 @@ vuint32m2_t test_vloxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_tumu( @@ -1561,7 +1561,7 @@ vuint32m4_t test_vloxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_tumu( @@ -1570,7 +1570,7 @@ vuint32m8_t test_vloxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_tumu( @@ -1579,7 +1579,7 @@ vuint64m1_t test_vloxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_tumu( @@ -1588,7 +1588,7 @@ vuint64m2_t test_vloxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_tumu( @@ -1597,7 +1597,7 @@ vuint64m4_t test_vloxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_mu( @@ -1606,7 +1606,7 @@ vuint64m8_t test_vloxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vloxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_mu( @@ -1615,7 +1615,7 @@ vfloat16mf4_t test_vloxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vloxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_mu( @@ -1624,7 +1624,7 @@ vfloat16mf2_t test_vloxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vloxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_mu( @@ -1633,7 +1633,7 @@ vfloat16m1_t test_vloxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vloxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_mu( @@ -1642,7 +1642,7 @@ vfloat16m2_t test_vloxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vloxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_mu( @@ -1651,7 +1651,7 @@ vfloat16m4_t test_vloxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vloxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_f16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_mu( @@ -1660,7 +1660,7 @@ vfloat16m8_t test_vloxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vloxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_mu( @@ -1669,7 +1669,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vloxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_mu( @@ -1678,7 +1678,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vloxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_mu( @@ -1687,7 +1687,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vloxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_mu( @@ -1696,7 +1696,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vloxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_mu( @@ -1705,7 +1705,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vloxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_mu( @@ -1714,7 +1714,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vloxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_mu( @@ -1723,7 +1723,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vloxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_mu( @@ -1732,7 +1732,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vloxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_f64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_mu( @@ -1741,7 +1741,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vloxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_mu( @@ -1750,7 +1750,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vloxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_mu( @@ -1759,7 +1759,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vloxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_mu( @@ -1768,7 +1768,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vloxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_mu( @@ -1777,7 +1777,7 @@ vint8m1_t test_vloxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vloxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_mu( @@ -1786,7 +1786,7 @@ vint8m2_t test_vloxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vloxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i8m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_mu( @@ -1795,7 +1795,7 @@ vint8m4_t test_vloxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vloxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_i8m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i8m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_mu( @@ -1804,7 +1804,7 @@ vint8m8_t test_vloxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vloxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_mu( @@ -1813,7 +1813,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vloxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_mu( @@ -1822,7 +1822,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vloxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_mu( @@ -1831,7 +1831,7 @@ vint16m1_t test_vloxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vloxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_mu( @@ -1840,7 +1840,7 @@ vint16m2_t test_vloxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vloxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_mu( @@ -1849,7 +1849,7 @@ vint16m4_t test_vloxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vloxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_i16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_mu( @@ -1858,7 +1858,7 @@ vint16m8_t test_vloxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vloxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_mu( @@ -1867,7 +1867,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vloxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_mu( @@ -1876,7 +1876,7 @@ vint32m1_t test_vloxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vloxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_mu( @@ -1885,7 +1885,7 @@ vint32m2_t test_vloxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vloxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_mu( @@ -1894,7 +1894,7 @@ vint32m4_t test_vloxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vloxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_i32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_mu( @@ -1903,7 +1903,7 @@ vint32m8_t test_vloxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vloxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_i64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_mu( @@ -1912,7 +1912,7 @@ vint64m1_t test_vloxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vloxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_i64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_mu( @@ -1921,7 +1921,7 @@ vint64m2_t test_vloxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vloxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_i64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_mu( @@ -1930,7 +1930,7 @@ vint64m4_t test_vloxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vloxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_i64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_i64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_mu( @@ -1939,7 +1939,7 @@ vint64m8_t test_vloxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vloxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_mu( @@ -1948,7 +1948,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vloxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_mu( @@ -1957,7 +1957,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vloxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_mu( @@ -1966,7 +1966,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vloxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_mu( @@ -1975,7 +1975,7 @@ vuint8m1_t test_vloxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vloxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_mu( @@ -1984,7 +1984,7 @@ vuint8m2_t test_vloxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vloxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u8m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_mu( @@ -1993,7 +1993,7 @@ vuint8m4_t test_vloxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vloxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8_v_u8m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u8m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_mu( @@ -2002,7 +2002,7 @@ vuint8m8_t test_vloxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vloxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_mu( @@ -2011,7 +2011,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vloxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_mu( @@ -2020,7 +2020,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vloxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_mu( @@ -2029,7 +2029,7 @@ vuint16m1_t test_vloxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vloxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_mu( @@ -2038,7 +2038,7 @@ vuint16m2_t test_vloxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vloxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_mu( @@ -2047,7 +2047,7 @@ vuint16m4_t test_vloxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vloxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_u16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_mu( @@ -2056,7 +2056,7 @@ vuint16m8_t test_vloxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vloxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_mu( @@ -2065,7 +2065,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vloxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_mu( @@ -2074,7 +2074,7 @@ vuint32m1_t test_vloxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vloxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_mu( @@ -2083,7 +2083,7 @@ vuint32m2_t test_vloxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vloxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_mu( @@ -2092,7 +2092,7 @@ vuint32m4_t test_vloxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vloxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_u32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_mu( @@ -2101,7 +2101,7 @@ vuint32m8_t test_vloxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vloxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_u64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_mu( @@ -2110,7 +2110,7 @@ vuint64m1_t test_vloxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vloxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_u64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_mu( @@ -2119,7 +2119,7 @@ vuint64m2_t test_vloxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vloxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_u64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_mu( @@ -2128,6 +2128,6 @@ vuint64m4_t test_vloxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vloxei8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_u64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vloxei8_v_u64m8_mu(mask, maskedoff, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c index 852214f52c81..66bb2ca6959e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei16.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_tu( @@ -30,7 +30,7 @@ void test_vloxseg2ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_tu( @@ -43,7 +43,7 @@ void test_vloxseg2ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_tu( @@ -56,7 +56,7 @@ void test_vloxseg2ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_tu( @@ -69,7 +69,7 @@ void test_vloxseg2ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_tu( @@ -82,7 +82,7 @@ void test_vloxseg2ei16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_tu( @@ -95,7 +95,7 @@ void test_vloxseg2ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_tu( @@ -108,7 +108,7 @@ void test_vloxseg2ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_tu( @@ -121,7 +121,7 @@ void test_vloxseg2ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_tu( @@ -134,7 +134,7 @@ void test_vloxseg2ei16_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_tu( @@ -147,7 +147,7 @@ void test_vloxseg2ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_tu( @@ -160,7 +160,7 @@ void test_vloxseg2ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_tu( @@ -173,7 +173,7 @@ void test_vloxseg2ei16_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_tu( @@ -186,7 +186,7 @@ void test_vloxseg2ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_tu( @@ -199,7 +199,7 @@ void test_vloxseg2ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_tu( @@ -212,7 +212,7 @@ void test_vloxseg2ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_tu( @@ -225,7 +225,7 @@ void test_vloxseg2ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_tu( @@ -238,7 +238,7 @@ void test_vloxseg2ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_tu( @@ -251,7 +251,7 @@ void test_vloxseg2ei16_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_tu( @@ -264,7 +264,7 @@ void test_vloxseg2ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vloxseg2ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_tu( @@ -290,7 +290,7 @@ void test_vloxseg2ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_tu( @@ -303,7 +303,7 @@ void test_vloxseg2ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_tu( @@ -316,7 +316,7 @@ void test_vloxseg2ei16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_tu( @@ -329,7 +329,7 @@ void test_vloxseg2ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_tu( @@ -342,7 +342,7 @@ void test_vloxseg2ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_tu( @@ -355,7 +355,7 @@ void test_vloxseg2ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_tu( @@ -368,7 +368,7 @@ void test_vloxseg2ei16_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_tu( @@ -381,7 +381,7 @@ void test_vloxseg2ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_tu( @@ -394,7 +394,7 @@ void test_vloxseg2ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_tu( @@ -407,7 +407,7 @@ void test_vloxseg2ei16_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_tu( @@ -420,7 +420,7 @@ void test_vloxseg2ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_tu( @@ -433,7 +433,7 @@ void test_vloxseg2ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_tu( @@ -446,7 +446,7 @@ void test_vloxseg2ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_tu( @@ -459,7 +459,7 @@ void test_vloxseg2ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_tu( @@ -472,7 +472,7 @@ void test_vloxseg2ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_tu( @@ -485,7 +485,7 @@ void test_vloxseg2ei16_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_tu( @@ -498,7 +498,7 @@ void test_vloxseg2ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_tu( @@ -511,7 +511,7 @@ void test_vloxseg2ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_tu( @@ -524,7 +524,7 @@ void test_vloxseg2ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_tu( @@ -537,7 +537,7 @@ void test_vloxseg2ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_tu( @@ -550,7 +550,7 @@ void test_vloxseg2ei16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_tu( @@ -563,7 +563,7 @@ void test_vloxseg2ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_tu( @@ -576,7 +576,7 @@ void test_vloxseg2ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_tu( @@ -589,7 +589,7 @@ void test_vloxseg2ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vloxseg2ei16_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_tu( @@ -615,7 +615,7 @@ void test_vloxseg2ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_tu( @@ -628,7 +628,7 @@ void test_vloxseg2ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_tum( @@ -641,7 +641,7 @@ void test_vloxseg2ei16_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_tum( @@ -654,7 +654,7 @@ void test_vloxseg2ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_tum( @@ -667,7 +667,7 @@ void test_vloxseg2ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_tum( @@ -680,7 +680,7 @@ void test_vloxseg2ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_tum( @@ -693,7 +693,7 @@ void test_vloxseg2ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_tum( @@ -706,7 +706,7 @@ void test_vloxseg2ei16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_tum( @@ -719,7 +719,7 @@ void test_vloxseg2ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_tum( @@ -732,7 +732,7 @@ void test_vloxseg2ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_tum( @@ -745,7 +745,7 @@ void test_vloxseg2ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_tum( @@ -758,7 +758,7 @@ void test_vloxseg2ei16_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_tum( @@ -771,7 +771,7 @@ void test_vloxseg2ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_tum( @@ -784,7 +784,7 @@ void test_vloxseg2ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_tum( @@ -797,7 +797,7 @@ void test_vloxseg2ei16_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_tum( @@ -810,7 +810,7 @@ void test_vloxseg2ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_tum( @@ -823,7 +823,7 @@ void test_vloxseg2ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_tum( @@ -836,7 +836,7 @@ void test_vloxseg2ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_tum( @@ -849,7 +849,7 @@ void test_vloxseg2ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_tum( @@ -862,7 +862,7 @@ void test_vloxseg2ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_tum( @@ -875,7 +875,7 @@ void test_vloxseg2ei16_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_tum( @@ -888,7 +888,7 @@ void test_vloxseg2ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vloxseg2ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_tum( @@ -914,7 +914,7 @@ void test_vloxseg2ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_tum( @@ -927,7 +927,7 @@ void test_vloxseg2ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_tum( @@ -940,7 +940,7 @@ void test_vloxseg2ei16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_tum( @@ -953,7 +953,7 @@ void test_vloxseg2ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_tum( @@ -966,7 +966,7 @@ void test_vloxseg2ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_tum( @@ -979,7 +979,7 @@ void test_vloxseg2ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_tum( @@ -992,7 +992,7 @@ void test_vloxseg2ei16_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_tum( @@ -1005,7 +1005,7 @@ void test_vloxseg2ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_tum( @@ -1018,7 +1018,7 @@ void test_vloxseg2ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_tum( @@ -1031,7 +1031,7 @@ void test_vloxseg2ei16_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_tum( @@ -1044,7 +1044,7 @@ void test_vloxseg2ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_tum( @@ -1057,7 +1057,7 @@ void test_vloxseg2ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_tum( @@ -1070,7 +1070,7 @@ void test_vloxseg2ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_tum( @@ -1083,7 +1083,7 @@ void test_vloxseg2ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_tum( @@ -1096,7 +1096,7 @@ void test_vloxseg2ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_tum( @@ -1109,7 +1109,7 @@ void test_vloxseg2ei16_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_tum( @@ -1122,7 +1122,7 @@ void test_vloxseg2ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_tum( @@ -1135,7 +1135,7 @@ void test_vloxseg2ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_tum( @@ -1148,7 +1148,7 @@ void test_vloxseg2ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_tum( @@ -1161,7 +1161,7 @@ void test_vloxseg2ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_tum( @@ -1174,7 +1174,7 @@ void test_vloxseg2ei16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_tum( @@ -1187,7 +1187,7 @@ void test_vloxseg2ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_tum( @@ -1200,7 +1200,7 @@ void test_vloxseg2ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_tum( @@ -1213,7 +1213,7 @@ void test_vloxseg2ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_tum( @@ -1226,7 +1226,7 @@ void test_vloxseg2ei16_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_tum( @@ -1239,7 +1239,7 @@ void test_vloxseg2ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_tum( @@ -1252,7 +1252,7 @@ void test_vloxseg2ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_tumu( @@ -1265,7 +1265,7 @@ void test_vloxseg2ei16_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_tumu( @@ -1278,7 +1278,7 @@ void test_vloxseg2ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_tumu( @@ -1291,7 +1291,7 @@ void test_vloxseg2ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_tumu( @@ -1304,7 +1304,7 @@ void test_vloxseg2ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_tumu( @@ -1317,7 +1317,7 @@ void test_vloxseg2ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_tumu( @@ -1330,7 +1330,7 @@ void test_vloxseg2ei16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_tumu( @@ -1343,7 +1343,7 @@ void test_vloxseg2ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_tumu( @@ -1356,7 +1356,7 @@ void test_vloxseg2ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg2ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_tumu( @@ -1382,7 +1382,7 @@ void test_vloxseg2ei16_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_tumu( @@ -1395,7 +1395,7 @@ void test_vloxseg2ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_tumu( @@ -1408,7 +1408,7 @@ void test_vloxseg2ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_tumu( @@ -1421,7 +1421,7 @@ void test_vloxseg2ei16_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_tumu( @@ -1434,7 +1434,7 @@ void test_vloxseg2ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_tumu( @@ -1447,7 +1447,7 @@ void test_vloxseg2ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_tumu( @@ -1460,7 +1460,7 @@ void test_vloxseg2ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_tumu( @@ -1473,7 +1473,7 @@ void test_vloxseg2ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_tumu( @@ -1486,7 +1486,7 @@ void test_vloxseg2ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_tumu( @@ -1499,7 +1499,7 @@ void test_vloxseg2ei16_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_tumu( @@ -1512,7 +1512,7 @@ void test_vloxseg2ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_tumu( @@ -1525,7 +1525,7 @@ void test_vloxseg2ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_tumu( @@ -1538,7 +1538,7 @@ void test_vloxseg2ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_tumu( @@ -1551,7 +1551,7 @@ void test_vloxseg2ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_tumu( @@ -1564,7 +1564,7 @@ void test_vloxseg2ei16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_tumu( @@ -1577,7 +1577,7 @@ void test_vloxseg2ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_tumu( @@ -1590,7 +1590,7 @@ void test_vloxseg2ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_tumu( @@ -1603,7 +1603,7 @@ void test_vloxseg2ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_tumu( @@ -1616,7 +1616,7 @@ void test_vloxseg2ei16_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_tumu( @@ -1629,7 +1629,7 @@ void test_vloxseg2ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_tumu( @@ -1642,7 +1642,7 @@ void test_vloxseg2ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_tumu( @@ -1655,7 +1655,7 @@ void test_vloxseg2ei16_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_tumu( @@ -1668,7 +1668,7 @@ void test_vloxseg2ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_tumu( @@ -1681,7 +1681,7 @@ void test_vloxseg2ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_tumu( @@ -1694,7 +1694,7 @@ void test_vloxseg2ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_tumu( @@ -1707,7 +1707,7 @@ void test_vloxseg2ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_tumu( @@ -1720,7 +1720,7 @@ void test_vloxseg2ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_tumu( @@ -1733,7 +1733,7 @@ void test_vloxseg2ei16_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_tumu( @@ -1746,7 +1746,7 @@ void test_vloxseg2ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_tumu( @@ -1759,7 +1759,7 @@ void test_vloxseg2ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_tumu( @@ -1772,7 +1772,7 @@ void test_vloxseg2ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_tumu( @@ -1785,7 +1785,7 @@ void test_vloxseg2ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_tumu( @@ -1798,7 +1798,7 @@ void test_vloxseg2ei16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_tumu( @@ -1811,7 +1811,7 @@ void test_vloxseg2ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_tumu( @@ -1824,7 +1824,7 @@ void test_vloxseg2ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_tumu( @@ -1837,7 +1837,7 @@ void test_vloxseg2ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_tumu( @@ -1850,7 +1850,7 @@ void test_vloxseg2ei16_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_tumu( @@ -1863,7 +1863,7 @@ void test_vloxseg2ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_tumu( @@ -1876,7 +1876,7 @@ void test_vloxseg2ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_mu( @@ -1889,7 +1889,7 @@ void test_vloxseg2ei16_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_mu( @@ -1902,7 +1902,7 @@ void test_vloxseg2ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_mu( @@ -1915,7 +1915,7 @@ void test_vloxseg2ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_mu( @@ -1928,7 +1928,7 @@ void test_vloxseg2ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_mu( @@ -1941,7 +1941,7 @@ void test_vloxseg2ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_mu( @@ -1954,7 +1954,7 @@ void test_vloxseg2ei16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_mu( @@ -1967,7 +1967,7 @@ void test_vloxseg2ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_mu( @@ -1980,7 +1980,7 @@ void test_vloxseg2ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_mu( @@ -1993,7 +1993,7 @@ void test_vloxseg2ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_mu( @@ -2006,7 +2006,7 @@ void test_vloxseg2ei16_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_mu( @@ -2019,7 +2019,7 @@ void test_vloxseg2ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_mu( @@ -2032,7 +2032,7 @@ void test_vloxseg2ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_mu( @@ -2045,7 +2045,7 @@ void test_vloxseg2ei16_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_mu( @@ -2058,7 +2058,7 @@ void test_vloxseg2ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_mu( @@ -2071,7 +2071,7 @@ void test_vloxseg2ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_mu( @@ -2084,7 +2084,7 @@ void test_vloxseg2ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_mu( @@ -2097,7 +2097,7 @@ void test_vloxseg2ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_mu( @@ -2110,7 +2110,7 @@ void test_vloxseg2ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_mu( @@ -2123,7 +2123,7 @@ void test_vloxseg2ei16_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_mu( @@ -2136,7 +2136,7 @@ void test_vloxseg2ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_mu( @@ -2149,7 +2149,7 @@ void test_vloxseg2ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_mu( @@ -2162,7 +2162,7 @@ void test_vloxseg2ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_mu( @@ -2175,7 +2175,7 @@ void test_vloxseg2ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_mu( @@ -2188,7 +2188,7 @@ void test_vloxseg2ei16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_mu( @@ -2201,7 +2201,7 @@ void test_vloxseg2ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_mu( @@ -2214,7 +2214,7 @@ void test_vloxseg2ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_mu( @@ -2227,7 +2227,7 @@ void test_vloxseg2ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_mu( @@ -2240,7 +2240,7 @@ void test_vloxseg2ei16_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_mu( @@ -2253,7 +2253,7 @@ void test_vloxseg2ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_mu( @@ -2266,7 +2266,7 @@ void test_vloxseg2ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_mu( @@ -2279,7 +2279,7 @@ void test_vloxseg2ei16_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_mu( @@ -2292,7 +2292,7 @@ void test_vloxseg2ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_mu( @@ -2305,7 +2305,7 @@ void test_vloxseg2ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_mu( @@ -2318,7 +2318,7 @@ void test_vloxseg2ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_mu( @@ -2331,7 +2331,7 @@ void test_vloxseg2ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_mu( @@ -2344,7 +2344,7 @@ void test_vloxseg2ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_mu( @@ -2357,7 +2357,7 @@ void test_vloxseg2ei16_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_mu( @@ -2370,7 +2370,7 @@ void test_vloxseg2ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_mu( @@ -2383,7 +2383,7 @@ void test_vloxseg2ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_mu( @@ -2396,7 +2396,7 @@ void test_vloxseg2ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_mu( @@ -2409,7 +2409,7 @@ void test_vloxseg2ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_mu( @@ -2422,7 +2422,7 @@ void test_vloxseg2ei16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_mu( @@ -2435,7 +2435,7 @@ void test_vloxseg2ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_mu( @@ -2448,7 +2448,7 @@ void test_vloxseg2ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_mu( @@ -2461,7 +2461,7 @@ void test_vloxseg2ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_mu( @@ -2474,7 +2474,7 @@ void test_vloxseg2ei16_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_mu( @@ -2487,7 +2487,7 @@ void test_vloxseg2ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_mu( @@ -2500,6 +2500,6 @@ void test_vloxseg2ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei16_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei16_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c index 3cc3876df2d5..1dc22cb473ba 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei32.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_tu( @@ -30,7 +30,7 @@ void test_vloxseg2ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_tu( @@ -43,7 +43,7 @@ void test_vloxseg2ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_tu( @@ -56,7 +56,7 @@ void test_vloxseg2ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_tu( @@ -69,7 +69,7 @@ void test_vloxseg2ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_tu( @@ -82,7 +82,7 @@ void test_vloxseg2ei32_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_tu( @@ -95,7 +95,7 @@ void test_vloxseg2ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_tu( @@ -108,7 +108,7 @@ void test_vloxseg2ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_tu( @@ -121,7 +121,7 @@ void test_vloxseg2ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_tu( @@ -134,7 +134,7 @@ void test_vloxseg2ei32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_tu( @@ -147,7 +147,7 @@ void test_vloxseg2ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_tu( @@ -160,7 +160,7 @@ void test_vloxseg2ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_tu( @@ -173,7 +173,7 @@ void test_vloxseg2ei32_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_tu( @@ -186,7 +186,7 @@ void test_vloxseg2ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_tu( @@ -199,7 +199,7 @@ void test_vloxseg2ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_tu( @@ -212,7 +212,7 @@ void test_vloxseg2ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_tu( @@ -225,7 +225,7 @@ void test_vloxseg2ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_tu( @@ -238,7 +238,7 @@ void test_vloxseg2ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_tu( @@ -251,7 +251,7 @@ void test_vloxseg2ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_tu( @@ -264,7 +264,7 @@ void test_vloxseg2ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_tu( @@ -277,7 +277,7 @@ void test_vloxseg2ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_tu( @@ -290,7 +290,7 @@ void test_vloxseg2ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tu( @@ -303,7 +303,7 @@ void test_vloxseg2ei32_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_tu( @@ -316,7 +316,7 @@ void test_vloxseg2ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_tu( @@ -329,7 +329,7 @@ void test_vloxseg2ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_tu( @@ -342,7 +342,7 @@ void test_vloxseg2ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_tu( @@ -355,7 +355,7 @@ void test_vloxseg2ei32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_tu( @@ -368,7 +368,7 @@ void test_vloxseg2ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_tu( @@ -381,7 +381,7 @@ void test_vloxseg2ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_tu( @@ -394,7 +394,7 @@ void test_vloxseg2ei32_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_tu( @@ -407,7 +407,7 @@ void test_vloxseg2ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_tu( @@ -420,7 +420,7 @@ void test_vloxseg2ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_tu( @@ -433,7 +433,7 @@ void test_vloxseg2ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_tu( @@ -446,7 +446,7 @@ void test_vloxseg2ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_tu( @@ -459,7 +459,7 @@ void test_vloxseg2ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_tu( @@ -472,7 +472,7 @@ void test_vloxseg2ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_tu( @@ -485,7 +485,7 @@ void test_vloxseg2ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_tu( @@ -498,7 +498,7 @@ void test_vloxseg2ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_tu( @@ -511,7 +511,7 @@ void test_vloxseg2ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_tu( @@ -524,7 +524,7 @@ void test_vloxseg2ei32_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_tu( @@ -537,7 +537,7 @@ void test_vloxseg2ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_tu( @@ -550,7 +550,7 @@ void test_vloxseg2ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_tu( @@ -563,7 +563,7 @@ void test_vloxseg2ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_tu( @@ -576,7 +576,7 @@ void test_vloxseg2ei32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_tu( @@ -589,7 +589,7 @@ void test_vloxseg2ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_tu( @@ -602,7 +602,7 @@ void test_vloxseg2ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_tum( @@ -615,7 +615,7 @@ void test_vloxseg2ei32_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_tum( @@ -628,7 +628,7 @@ void test_vloxseg2ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_tum( @@ -641,7 +641,7 @@ void test_vloxseg2ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_tum( @@ -654,7 +654,7 @@ void test_vloxseg2ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_tum( @@ -667,7 +667,7 @@ void test_vloxseg2ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_tum( @@ -680,7 +680,7 @@ void test_vloxseg2ei32_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_tum( @@ -693,7 +693,7 @@ void test_vloxseg2ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_tum( @@ -706,7 +706,7 @@ void test_vloxseg2ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_tum( @@ -719,7 +719,7 @@ void test_vloxseg2ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_tum( @@ -732,7 +732,7 @@ void test_vloxseg2ei32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_tum( @@ -745,7 +745,7 @@ void test_vloxseg2ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_tum( @@ -758,7 +758,7 @@ void test_vloxseg2ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_tum( @@ -771,7 +771,7 @@ void test_vloxseg2ei32_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_tum( @@ -784,7 +784,7 @@ void test_vloxseg2ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_tum( @@ -797,7 +797,7 @@ void test_vloxseg2ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_tum( @@ -810,7 +810,7 @@ void test_vloxseg2ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_tum( @@ -823,7 +823,7 @@ void test_vloxseg2ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_tum( @@ -836,7 +836,7 @@ void test_vloxseg2ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_tum( @@ -849,7 +849,7 @@ void test_vloxseg2ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_tum( @@ -862,7 +862,7 @@ void test_vloxseg2ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_tum( @@ -875,7 +875,7 @@ void test_vloxseg2ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_tum( @@ -888,7 +888,7 @@ void test_vloxseg2ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tum( @@ -901,7 +901,7 @@ void test_vloxseg2ei32_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_tum( @@ -914,7 +914,7 @@ void test_vloxseg2ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_tum( @@ -927,7 +927,7 @@ void test_vloxseg2ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_tum( @@ -940,7 +940,7 @@ void test_vloxseg2ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_tum( @@ -953,7 +953,7 @@ void test_vloxseg2ei32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_tum( @@ -966,7 +966,7 @@ void test_vloxseg2ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_tum( @@ -979,7 +979,7 @@ void test_vloxseg2ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_tum( @@ -992,7 +992,7 @@ void test_vloxseg2ei32_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_tum( @@ -1005,7 +1005,7 @@ void test_vloxseg2ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_tum( @@ -1018,7 +1018,7 @@ void test_vloxseg2ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_tum( @@ -1031,7 +1031,7 @@ void test_vloxseg2ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_tum( @@ -1044,7 +1044,7 @@ void test_vloxseg2ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_tum( @@ -1057,7 +1057,7 @@ void test_vloxseg2ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_tum( @@ -1070,7 +1070,7 @@ void test_vloxseg2ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_tum( @@ -1083,7 +1083,7 @@ void test_vloxseg2ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_tum( @@ -1096,7 +1096,7 @@ void test_vloxseg2ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_tum( @@ -1109,7 +1109,7 @@ void test_vloxseg2ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_tum( @@ -1122,7 +1122,7 @@ void test_vloxseg2ei32_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_tum( @@ -1135,7 +1135,7 @@ void test_vloxseg2ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_tum( @@ -1148,7 +1148,7 @@ void test_vloxseg2ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_tum( @@ -1161,7 +1161,7 @@ void test_vloxseg2ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_tum( @@ -1174,7 +1174,7 @@ void test_vloxseg2ei32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_tum( @@ -1187,7 +1187,7 @@ void test_vloxseg2ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_tum( @@ -1200,7 +1200,7 @@ void test_vloxseg2ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_tumu( @@ -1213,7 +1213,7 @@ void test_vloxseg2ei32_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_tumu( @@ -1226,7 +1226,7 @@ void test_vloxseg2ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_tumu( @@ -1239,7 +1239,7 @@ void test_vloxseg2ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_tumu( @@ -1252,7 +1252,7 @@ void test_vloxseg2ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_tumu( @@ -1265,7 +1265,7 @@ void test_vloxseg2ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_tumu( @@ -1278,7 +1278,7 @@ void test_vloxseg2ei32_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_tumu( @@ -1291,7 +1291,7 @@ void test_vloxseg2ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_tumu( @@ -1304,7 +1304,7 @@ void test_vloxseg2ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_tumu( @@ -1317,7 +1317,7 @@ void test_vloxseg2ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_tumu( @@ -1330,7 +1330,7 @@ void test_vloxseg2ei32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_tumu( @@ -1343,7 +1343,7 @@ void test_vloxseg2ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_tumu( @@ -1356,7 +1356,7 @@ void test_vloxseg2ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg2ei32_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_tumu( @@ -1382,7 +1382,7 @@ void test_vloxseg2ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_tumu( @@ -1395,7 +1395,7 @@ void test_vloxseg2ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_tumu( @@ -1408,7 +1408,7 @@ void test_vloxseg2ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_tumu( @@ -1421,7 +1421,7 @@ void test_vloxseg2ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_tumu( @@ -1434,7 +1434,7 @@ void test_vloxseg2ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_tumu( @@ -1447,7 +1447,7 @@ void test_vloxseg2ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_tumu( @@ -1460,7 +1460,7 @@ void test_vloxseg2ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_tumu( @@ -1473,7 +1473,7 @@ void test_vloxseg2ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_tumu( @@ -1486,7 +1486,7 @@ void test_vloxseg2ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tumu( @@ -1499,7 +1499,7 @@ void test_vloxseg2ei32_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_tumu( @@ -1512,7 +1512,7 @@ void test_vloxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_tumu( @@ -1525,7 +1525,7 @@ void test_vloxseg2ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_tumu( @@ -1538,7 +1538,7 @@ void test_vloxseg2ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_tumu( @@ -1551,7 +1551,7 @@ void test_vloxseg2ei32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_tumu( @@ -1564,7 +1564,7 @@ void test_vloxseg2ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_tumu( @@ -1577,7 +1577,7 @@ void test_vloxseg2ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_tumu( @@ -1590,7 +1590,7 @@ void test_vloxseg2ei32_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_tumu( @@ -1603,7 +1603,7 @@ void test_vloxseg2ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_tumu( @@ -1616,7 +1616,7 @@ void test_vloxseg2ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_tumu( @@ -1629,7 +1629,7 @@ void test_vloxseg2ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_tumu( @@ -1642,7 +1642,7 @@ void test_vloxseg2ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_tumu( @@ -1655,7 +1655,7 @@ void test_vloxseg2ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_tumu( @@ -1668,7 +1668,7 @@ void test_vloxseg2ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_tumu( @@ -1681,7 +1681,7 @@ void test_vloxseg2ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_tumu( @@ -1694,7 +1694,7 @@ void test_vloxseg2ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_tumu( @@ -1707,7 +1707,7 @@ void test_vloxseg2ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_tumu( @@ -1720,7 +1720,7 @@ void test_vloxseg2ei32_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_tumu( @@ -1733,7 +1733,7 @@ void test_vloxseg2ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_tumu( @@ -1746,7 +1746,7 @@ void test_vloxseg2ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_tumu( @@ -1759,7 +1759,7 @@ void test_vloxseg2ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_tumu( @@ -1772,7 +1772,7 @@ void test_vloxseg2ei32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_tumu( @@ -1785,7 +1785,7 @@ void test_vloxseg2ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_tumu( @@ -1798,7 +1798,7 @@ void test_vloxseg2ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_mu( @@ -1811,7 +1811,7 @@ void test_vloxseg2ei32_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_mu( @@ -1824,7 +1824,7 @@ void test_vloxseg2ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_mu( @@ -1837,7 +1837,7 @@ void test_vloxseg2ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_mu( @@ -1850,7 +1850,7 @@ void test_vloxseg2ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_mu( @@ -1863,7 +1863,7 @@ void test_vloxseg2ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_mu( @@ -1876,7 +1876,7 @@ void test_vloxseg2ei32_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_mu( @@ -1889,7 +1889,7 @@ void test_vloxseg2ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_mu( @@ -1902,7 +1902,7 @@ void test_vloxseg2ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_mu( @@ -1915,7 +1915,7 @@ void test_vloxseg2ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_mu( @@ -1928,7 +1928,7 @@ void test_vloxseg2ei32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_mu( @@ -1941,7 +1941,7 @@ void test_vloxseg2ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_mu( @@ -1954,7 +1954,7 @@ void test_vloxseg2ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_mu( @@ -1967,7 +1967,7 @@ void test_vloxseg2ei32_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_mu( @@ -1980,7 +1980,7 @@ void test_vloxseg2ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_mu( @@ -1993,7 +1993,7 @@ void test_vloxseg2ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_mu( @@ -2006,7 +2006,7 @@ void test_vloxseg2ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_mu( @@ -2019,7 +2019,7 @@ void test_vloxseg2ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_mu( @@ -2032,7 +2032,7 @@ void test_vloxseg2ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_mu( @@ -2045,7 +2045,7 @@ void test_vloxseg2ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_mu( @@ -2058,7 +2058,7 @@ void test_vloxseg2ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_mu( @@ -2071,7 +2071,7 @@ void test_vloxseg2ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_mu( @@ -2084,7 +2084,7 @@ void test_vloxseg2ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_mu( @@ -2097,7 +2097,7 @@ void test_vloxseg2ei32_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_mu( @@ -2110,7 +2110,7 @@ void test_vloxseg2ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_mu( @@ -2123,7 +2123,7 @@ void test_vloxseg2ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_mu( @@ -2136,7 +2136,7 @@ void test_vloxseg2ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_mu( @@ -2149,7 +2149,7 @@ void test_vloxseg2ei32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_mu( @@ -2162,7 +2162,7 @@ void test_vloxseg2ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_mu( @@ -2175,7 +2175,7 @@ void test_vloxseg2ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_mu( @@ -2188,7 +2188,7 @@ void test_vloxseg2ei32_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_mu( @@ -2201,7 +2201,7 @@ void test_vloxseg2ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_mu( @@ -2214,7 +2214,7 @@ void test_vloxseg2ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_mu( @@ -2227,7 +2227,7 @@ void test_vloxseg2ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_mu( @@ -2240,7 +2240,7 @@ void test_vloxseg2ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_mu( @@ -2253,7 +2253,7 @@ void test_vloxseg2ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_mu( @@ -2266,7 +2266,7 @@ void test_vloxseg2ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_mu( @@ -2279,7 +2279,7 @@ void test_vloxseg2ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_mu( @@ -2292,7 +2292,7 @@ void test_vloxseg2ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_mu( @@ -2305,7 +2305,7 @@ void test_vloxseg2ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_mu( @@ -2318,7 +2318,7 @@ void test_vloxseg2ei32_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_mu( @@ -2331,7 +2331,7 @@ void test_vloxseg2ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_mu( @@ -2344,7 +2344,7 @@ void test_vloxseg2ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_mu( @@ -2357,7 +2357,7 @@ void test_vloxseg2ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_mu( @@ -2370,7 +2370,7 @@ void test_vloxseg2ei32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_mu( @@ -2383,7 +2383,7 @@ void test_vloxseg2ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_mu( @@ -2396,6 +2396,6 @@ void test_vloxseg2ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei32_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei32_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c index 7eb993e0efdd..a0f521ba5eae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei64.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_tu( @@ -30,7 +30,7 @@ void test_vloxseg2ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_tu( @@ -43,7 +43,7 @@ void test_vloxseg2ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_tu( @@ -56,7 +56,7 @@ void test_vloxseg2ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_tu( @@ -69,7 +69,7 @@ void test_vloxseg2ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_tu( @@ -82,7 +82,7 @@ void test_vloxseg2ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_tu( @@ -95,7 +95,7 @@ void test_vloxseg2ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_tu( @@ -108,7 +108,7 @@ void test_vloxseg2ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_tu( @@ -121,7 +121,7 @@ void test_vloxseg2ei64_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_tu( @@ -134,7 +134,7 @@ void test_vloxseg2ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_tu( @@ -147,7 +147,7 @@ void test_vloxseg2ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_tu( @@ -160,7 +160,7 @@ void test_vloxseg2ei64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_tu( @@ -173,7 +173,7 @@ void test_vloxseg2ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_tu( @@ -186,7 +186,7 @@ void test_vloxseg2ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_tu( @@ -199,7 +199,7 @@ void test_vloxseg2ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_tu( @@ -212,7 +212,7 @@ void test_vloxseg2ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_tu( @@ -225,7 +225,7 @@ void test_vloxseg2ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_tu( @@ -238,7 +238,7 @@ void test_vloxseg2ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_tu( @@ -251,7 +251,7 @@ void test_vloxseg2ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_tu( @@ -264,7 +264,7 @@ void test_vloxseg2ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_tu( @@ -277,7 +277,7 @@ void test_vloxseg2ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_tu( @@ -290,7 +290,7 @@ void test_vloxseg2ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_tu( @@ -303,7 +303,7 @@ void test_vloxseg2ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_tu( @@ -316,7 +316,7 @@ void test_vloxseg2ei64_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_tu( @@ -329,7 +329,7 @@ void test_vloxseg2ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_tu( @@ -342,7 +342,7 @@ void test_vloxseg2ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_tu( @@ -355,7 +355,7 @@ void test_vloxseg2ei64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_tu( @@ -368,7 +368,7 @@ void test_vloxseg2ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_tu( @@ -381,7 +381,7 @@ void test_vloxseg2ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_tu( @@ -394,7 +394,7 @@ void test_vloxseg2ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_tu( @@ -407,7 +407,7 @@ void test_vloxseg2ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_tu( @@ -420,7 +420,7 @@ void test_vloxseg2ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_tu( @@ -433,7 +433,7 @@ void test_vloxseg2ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_tu( @@ -446,7 +446,7 @@ void test_vloxseg2ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_tu( @@ -459,7 +459,7 @@ void test_vloxseg2ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_tu( @@ -472,7 +472,7 @@ void test_vloxseg2ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_tu( @@ -485,7 +485,7 @@ void test_vloxseg2ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_tu( @@ -498,7 +498,7 @@ void test_vloxseg2ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_tu( @@ -511,7 +511,7 @@ void test_vloxseg2ei64_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_tu( @@ -524,7 +524,7 @@ void test_vloxseg2ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_tu( @@ -537,7 +537,7 @@ void test_vloxseg2ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_tum( @@ -550,7 +550,7 @@ void test_vloxseg2ei64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_tum( @@ -563,7 +563,7 @@ void test_vloxseg2ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_tum( @@ -576,7 +576,7 @@ void test_vloxseg2ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_tum( @@ -589,7 +589,7 @@ void test_vloxseg2ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_tum( @@ -602,7 +602,7 @@ void test_vloxseg2ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_tum( @@ -615,7 +615,7 @@ void test_vloxseg2ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_tum( @@ -628,7 +628,7 @@ void test_vloxseg2ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_tum( @@ -641,7 +641,7 @@ void test_vloxseg2ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_tum( @@ -654,7 +654,7 @@ void test_vloxseg2ei64_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_tum( @@ -667,7 +667,7 @@ void test_vloxseg2ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_tum( @@ -680,7 +680,7 @@ void test_vloxseg2ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_tum( @@ -693,7 +693,7 @@ void test_vloxseg2ei64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_tum( @@ -706,7 +706,7 @@ void test_vloxseg2ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_tum( @@ -719,7 +719,7 @@ void test_vloxseg2ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_tum( @@ -732,7 +732,7 @@ void test_vloxseg2ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_tum( @@ -745,7 +745,7 @@ void test_vloxseg2ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_tum( @@ -758,7 +758,7 @@ void test_vloxseg2ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_tum( @@ -771,7 +771,7 @@ void test_vloxseg2ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_tum( @@ -784,7 +784,7 @@ void test_vloxseg2ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_tum( @@ -797,7 +797,7 @@ void test_vloxseg2ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_tum( @@ -810,7 +810,7 @@ void test_vloxseg2ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_tum( @@ -823,7 +823,7 @@ void test_vloxseg2ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_tum( @@ -836,7 +836,7 @@ void test_vloxseg2ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_tum( @@ -849,7 +849,7 @@ void test_vloxseg2ei64_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_tum( @@ -862,7 +862,7 @@ void test_vloxseg2ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_tum( @@ -875,7 +875,7 @@ void test_vloxseg2ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_tum( @@ -888,7 +888,7 @@ void test_vloxseg2ei64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_tum( @@ -901,7 +901,7 @@ void test_vloxseg2ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_tum( @@ -914,7 +914,7 @@ void test_vloxseg2ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_tum( @@ -927,7 +927,7 @@ void test_vloxseg2ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_tum( @@ -940,7 +940,7 @@ void test_vloxseg2ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_tum( @@ -953,7 +953,7 @@ void test_vloxseg2ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_tum( @@ -966,7 +966,7 @@ void test_vloxseg2ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_tum( @@ -979,7 +979,7 @@ void test_vloxseg2ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_tum( @@ -992,7 +992,7 @@ void test_vloxseg2ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_tum( @@ -1005,7 +1005,7 @@ void test_vloxseg2ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_tum( @@ -1018,7 +1018,7 @@ void test_vloxseg2ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_tum( @@ -1031,7 +1031,7 @@ void test_vloxseg2ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_tum( @@ -1044,7 +1044,7 @@ void test_vloxseg2ei64_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_tum( @@ -1057,7 +1057,7 @@ void test_vloxseg2ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_tum( @@ -1070,7 +1070,7 @@ void test_vloxseg2ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_tumu( @@ -1083,7 +1083,7 @@ void test_vloxseg2ei64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_tumu( @@ -1096,7 +1096,7 @@ void test_vloxseg2ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_tumu( @@ -1109,7 +1109,7 @@ void test_vloxseg2ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_tumu( @@ -1122,7 +1122,7 @@ void test_vloxseg2ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_tumu( @@ -1135,7 +1135,7 @@ void test_vloxseg2ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_tumu( @@ -1148,7 +1148,7 @@ void test_vloxseg2ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_tumu( @@ -1161,7 +1161,7 @@ void test_vloxseg2ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_tumu( @@ -1174,7 +1174,7 @@ void test_vloxseg2ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_tumu( @@ -1187,7 +1187,7 @@ void test_vloxseg2ei64_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_tumu( @@ -1200,7 +1200,7 @@ void test_vloxseg2ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_tumu( @@ -1213,7 +1213,7 @@ void test_vloxseg2ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_tumu( @@ -1226,7 +1226,7 @@ void test_vloxseg2ei64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_tumu( @@ -1239,7 +1239,7 @@ void test_vloxseg2ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_tumu( @@ -1252,7 +1252,7 @@ void test_vloxseg2ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_tumu( @@ -1265,7 +1265,7 @@ void test_vloxseg2ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_tumu( @@ -1278,7 +1278,7 @@ void test_vloxseg2ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_tumu( @@ -1291,7 +1291,7 @@ void test_vloxseg2ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_tumu( @@ -1304,7 +1304,7 @@ void test_vloxseg2ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_tumu( @@ -1317,7 +1317,7 @@ void test_vloxseg2ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_tumu( @@ -1330,7 +1330,7 @@ void test_vloxseg2ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_tumu( @@ -1343,7 +1343,7 @@ void test_vloxseg2ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_tumu( @@ -1356,7 +1356,7 @@ void test_vloxseg2ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg2ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_tumu( @@ -1382,7 +1382,7 @@ void test_vloxseg2ei64_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_tumu( @@ -1395,7 +1395,7 @@ void test_vloxseg2ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_tumu( @@ -1408,7 +1408,7 @@ void test_vloxseg2ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_tumu( @@ -1421,7 +1421,7 @@ void test_vloxseg2ei64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_tumu( @@ -1434,7 +1434,7 @@ void test_vloxseg2ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_tumu( @@ -1447,7 +1447,7 @@ void test_vloxseg2ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_tumu( @@ -1460,7 +1460,7 @@ void test_vloxseg2ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_tumu( @@ -1473,7 +1473,7 @@ void test_vloxseg2ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_tumu( @@ -1486,7 +1486,7 @@ void test_vloxseg2ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_tumu( @@ -1499,7 +1499,7 @@ void test_vloxseg2ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_tumu( @@ -1512,7 +1512,7 @@ void test_vloxseg2ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_tumu( @@ -1525,7 +1525,7 @@ void test_vloxseg2ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_tumu( @@ -1538,7 +1538,7 @@ void test_vloxseg2ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_tumu( @@ -1551,7 +1551,7 @@ void test_vloxseg2ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_tumu( @@ -1564,7 +1564,7 @@ void test_vloxseg2ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_tumu( @@ -1577,7 +1577,7 @@ void test_vloxseg2ei64_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_tumu( @@ -1590,7 +1590,7 @@ void test_vloxseg2ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_tumu( @@ -1603,7 +1603,7 @@ void test_vloxseg2ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_mu( @@ -1616,7 +1616,7 @@ void test_vloxseg2ei64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_mu( @@ -1629,7 +1629,7 @@ void test_vloxseg2ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_mu( @@ -1642,7 +1642,7 @@ void test_vloxseg2ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_mu( @@ -1655,7 +1655,7 @@ void test_vloxseg2ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_mu( @@ -1668,7 +1668,7 @@ void test_vloxseg2ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_mu( @@ -1681,7 +1681,7 @@ void test_vloxseg2ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_mu( @@ -1694,7 +1694,7 @@ void test_vloxseg2ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_mu( @@ -1707,7 +1707,7 @@ void test_vloxseg2ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_mu( @@ -1720,7 +1720,7 @@ void test_vloxseg2ei64_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_mu( @@ -1733,7 +1733,7 @@ void test_vloxseg2ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_mu( @@ -1746,7 +1746,7 @@ void test_vloxseg2ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_mu( @@ -1759,7 +1759,7 @@ void test_vloxseg2ei64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_mu( @@ -1772,7 +1772,7 @@ void test_vloxseg2ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_mu( @@ -1785,7 +1785,7 @@ void test_vloxseg2ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_mu( @@ -1798,7 +1798,7 @@ void test_vloxseg2ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_mu( @@ -1811,7 +1811,7 @@ void test_vloxseg2ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_mu( @@ -1824,7 +1824,7 @@ void test_vloxseg2ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_mu( @@ -1837,7 +1837,7 @@ void test_vloxseg2ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_mu( @@ -1850,7 +1850,7 @@ void test_vloxseg2ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_mu( @@ -1863,7 +1863,7 @@ void test_vloxseg2ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_mu( @@ -1876,7 +1876,7 @@ void test_vloxseg2ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_mu( @@ -1889,7 +1889,7 @@ void test_vloxseg2ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_mu( @@ -1902,7 +1902,7 @@ void test_vloxseg2ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_mu( @@ -1915,7 +1915,7 @@ void test_vloxseg2ei64_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_mu( @@ -1928,7 +1928,7 @@ void test_vloxseg2ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_mu( @@ -1941,7 +1941,7 @@ void test_vloxseg2ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_mu( @@ -1954,7 +1954,7 @@ void test_vloxseg2ei64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_mu( @@ -1967,7 +1967,7 @@ void test_vloxseg2ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_mu( @@ -1980,7 +1980,7 @@ void test_vloxseg2ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_mu( @@ -1993,7 +1993,7 @@ void test_vloxseg2ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_mu( @@ -2006,7 +2006,7 @@ void test_vloxseg2ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_mu( @@ -2019,7 +2019,7 @@ void test_vloxseg2ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_mu( @@ -2032,7 +2032,7 @@ void test_vloxseg2ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_mu( @@ -2045,7 +2045,7 @@ void test_vloxseg2ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_mu( @@ -2058,7 +2058,7 @@ void test_vloxseg2ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_mu( @@ -2071,7 +2071,7 @@ void test_vloxseg2ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_mu( @@ -2084,7 +2084,7 @@ void test_vloxseg2ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_mu( @@ -2097,7 +2097,7 @@ void test_vloxseg2ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_mu( @@ -2110,7 +2110,7 @@ void test_vloxseg2ei64_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_mu( @@ -2123,7 +2123,7 @@ void test_vloxseg2ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_mu( @@ -2136,6 +2136,6 @@ void test_vloxseg2ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei64_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c index b29dfc60d148..ab44396a46f4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg2ei8.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_tu( @@ -30,7 +30,7 @@ void test_vloxseg2ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_tu( @@ -43,7 +43,7 @@ void test_vloxseg2ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_tu( @@ -56,7 +56,7 @@ void test_vloxseg2ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_tu( @@ -69,7 +69,7 @@ void test_vloxseg2ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_tu( @@ -82,7 +82,7 @@ void test_vloxseg2ei8_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_tu( @@ -95,7 +95,7 @@ void test_vloxseg2ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_tu( @@ -108,7 +108,7 @@ void test_vloxseg2ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_tu( @@ -121,7 +121,7 @@ void test_vloxseg2ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_tu( @@ -134,7 +134,7 @@ void test_vloxseg2ei8_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_tu( @@ -147,7 +147,7 @@ void test_vloxseg2ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_tu( @@ -160,7 +160,7 @@ void test_vloxseg2ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_tu( @@ -173,7 +173,7 @@ void test_vloxseg2ei8_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_tu( @@ -186,7 +186,7 @@ void test_vloxseg2ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_tu( @@ -199,7 +199,7 @@ void test_vloxseg2ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_tu( @@ -212,7 +212,7 @@ void test_vloxseg2ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_tu( @@ -225,7 +225,7 @@ void test_vloxseg2ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedof // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_tu( @@ -238,7 +238,7 @@ void test_vloxseg2ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedof // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_tu( @@ -251,7 +251,7 @@ void test_vloxseg2ei8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedof // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_tu( @@ -264,7 +264,7 @@ void test_vloxseg2ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vloxseg2ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_tu( @@ -290,7 +290,7 @@ void test_vloxseg2ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_tu( @@ -303,7 +303,7 @@ void test_vloxseg2ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_tu( @@ -316,7 +316,7 @@ void test_vloxseg2ei8_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_tu( @@ -329,7 +329,7 @@ void test_vloxseg2ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_tu( @@ -342,7 +342,7 @@ void test_vloxseg2ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_tu( @@ -355,7 +355,7 @@ void test_vloxseg2ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_tu( @@ -368,7 +368,7 @@ void test_vloxseg2ei8_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_tu( @@ -381,7 +381,7 @@ void test_vloxseg2ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_tu( @@ -394,7 +394,7 @@ void test_vloxseg2ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_tu( @@ -407,7 +407,7 @@ void test_vloxseg2ei8_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_tu( @@ -420,7 +420,7 @@ void test_vloxseg2ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_tu( @@ -433,7 +433,7 @@ void test_vloxseg2ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_tu( @@ -446,7 +446,7 @@ void test_vloxseg2ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_tu( @@ -459,7 +459,7 @@ void test_vloxseg2ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maske // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_tu( @@ -472,7 +472,7 @@ void test_vloxseg2ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maske // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_tu( @@ -485,7 +485,7 @@ void test_vloxseg2ei8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maske // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_tu( @@ -498,7 +498,7 @@ void test_vloxseg2ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_tu( @@ -511,7 +511,7 @@ void test_vloxseg2ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_tu( @@ -524,7 +524,7 @@ void test_vloxseg2ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_tu( @@ -537,7 +537,7 @@ void test_vloxseg2ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_tu( @@ -550,7 +550,7 @@ void test_vloxseg2ei8_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_tu( @@ -563,7 +563,7 @@ void test_vloxseg2ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_tu( @@ -576,7 +576,7 @@ void test_vloxseg2ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_tu( @@ -589,7 +589,7 @@ void test_vloxseg2ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vloxseg2ei8_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_tu( @@ -615,7 +615,7 @@ void test_vloxseg2ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_tu( @@ -628,7 +628,7 @@ void test_vloxseg2ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_tum( @@ -641,7 +641,7 @@ void test_vloxseg2ei8_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_tum( @@ -654,7 +654,7 @@ void test_vloxseg2ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_tum( @@ -667,7 +667,7 @@ void test_vloxseg2ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_tum( @@ -680,7 +680,7 @@ void test_vloxseg2ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_tum( @@ -693,7 +693,7 @@ void test_vloxseg2ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_tum( @@ -706,7 +706,7 @@ void test_vloxseg2ei8_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_tum( @@ -719,7 +719,7 @@ void test_vloxseg2ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_tum( @@ -732,7 +732,7 @@ void test_vloxseg2ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_tum( @@ -745,7 +745,7 @@ void test_vloxseg2ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_tum( @@ -758,7 +758,7 @@ void test_vloxseg2ei8_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_tum( @@ -771,7 +771,7 @@ void test_vloxseg2ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_tum( @@ -784,7 +784,7 @@ void test_vloxseg2ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_tum( @@ -797,7 +797,7 @@ void test_vloxseg2ei8_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_tum( @@ -810,7 +810,7 @@ void test_vloxseg2ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_tum( @@ -823,7 +823,7 @@ void test_vloxseg2ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_tum( @@ -836,7 +836,7 @@ void test_vloxseg2ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_tum( @@ -849,7 +849,7 @@ void test_vloxseg2ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_tum( @@ -862,7 +862,7 @@ void test_vloxseg2ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_tum( @@ -875,7 +875,7 @@ void test_vloxseg2ei8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_tum( @@ -888,7 +888,7 @@ void test_vloxseg2ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vloxseg2ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_tum( @@ -914,7 +914,7 @@ void test_vloxseg2ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_tum( @@ -927,7 +927,7 @@ void test_vloxseg2ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_tum( @@ -940,7 +940,7 @@ void test_vloxseg2ei8_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_tum( @@ -953,7 +953,7 @@ void test_vloxseg2ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_tum( @@ -966,7 +966,7 @@ void test_vloxseg2ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_tum( @@ -979,7 +979,7 @@ void test_vloxseg2ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_tum( @@ -992,7 +992,7 @@ void test_vloxseg2ei8_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_tum( @@ -1005,7 +1005,7 @@ void test_vloxseg2ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_tum( @@ -1018,7 +1018,7 @@ void test_vloxseg2ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_tum( @@ -1031,7 +1031,7 @@ void test_vloxseg2ei8_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_tum( @@ -1044,7 +1044,7 @@ void test_vloxseg2ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_tum( @@ -1057,7 +1057,7 @@ void test_vloxseg2ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_tum( @@ -1070,7 +1070,7 @@ void test_vloxseg2ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_tum( @@ -1083,7 +1083,7 @@ void test_vloxseg2ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_tum( @@ -1096,7 +1096,7 @@ void test_vloxseg2ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_tum( @@ -1109,7 +1109,7 @@ void test_vloxseg2ei8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_tum( @@ -1122,7 +1122,7 @@ void test_vloxseg2ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_tum( @@ -1135,7 +1135,7 @@ void test_vloxseg2ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_tum( @@ -1148,7 +1148,7 @@ void test_vloxseg2ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_tum( @@ -1161,7 +1161,7 @@ void test_vloxseg2ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_tum( @@ -1174,7 +1174,7 @@ void test_vloxseg2ei8_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_tum( @@ -1187,7 +1187,7 @@ void test_vloxseg2ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_tum( @@ -1200,7 +1200,7 @@ void test_vloxseg2ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_tum( @@ -1213,7 +1213,7 @@ void test_vloxseg2ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_tum( @@ -1226,7 +1226,7 @@ void test_vloxseg2ei8_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_tum( @@ -1239,7 +1239,7 @@ void test_vloxseg2ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_tum( @@ -1252,7 +1252,7 @@ void test_vloxseg2ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_tumu( @@ -1265,7 +1265,7 @@ void test_vloxseg2ei8_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_tumu( @@ -1278,7 +1278,7 @@ void test_vloxseg2ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_tumu( @@ -1291,7 +1291,7 @@ void test_vloxseg2ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_tumu( @@ -1304,7 +1304,7 @@ void test_vloxseg2ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_tumu( @@ -1317,7 +1317,7 @@ void test_vloxseg2ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_tumu( @@ -1330,7 +1330,7 @@ void test_vloxseg2ei8_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_tumu( @@ -1343,7 +1343,7 @@ void test_vloxseg2ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_tumu( @@ -1356,7 +1356,7 @@ void test_vloxseg2ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg2ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_tumu( @@ -1382,7 +1382,7 @@ void test_vloxseg2ei8_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_tumu( @@ -1395,7 +1395,7 @@ void test_vloxseg2ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_tumu( @@ -1408,7 +1408,7 @@ void test_vloxseg2ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_tumu( @@ -1421,7 +1421,7 @@ void test_vloxseg2ei8_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_tumu( @@ -1434,7 +1434,7 @@ void test_vloxseg2ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_tumu( @@ -1447,7 +1447,7 @@ void test_vloxseg2ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_tumu( @@ -1460,7 +1460,7 @@ void test_vloxseg2ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_tumu( @@ -1473,7 +1473,7 @@ void test_vloxseg2ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_tumu( @@ -1486,7 +1486,7 @@ void test_vloxseg2ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_tumu( @@ -1499,7 +1499,7 @@ void test_vloxseg2ei8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_tumu( @@ -1512,7 +1512,7 @@ void test_vloxseg2ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_tumu( @@ -1525,7 +1525,7 @@ void test_vloxseg2ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_tumu( @@ -1538,7 +1538,7 @@ void test_vloxseg2ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_tumu( @@ -1551,7 +1551,7 @@ void test_vloxseg2ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_tumu( @@ -1564,7 +1564,7 @@ void test_vloxseg2ei8_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_tumu( @@ -1577,7 +1577,7 @@ void test_vloxseg2ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_tumu( @@ -1590,7 +1590,7 @@ void test_vloxseg2ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_tumu( @@ -1603,7 +1603,7 @@ void test_vloxseg2ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_tumu( @@ -1616,7 +1616,7 @@ void test_vloxseg2ei8_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_tumu( @@ -1629,7 +1629,7 @@ void test_vloxseg2ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_tumu( @@ -1642,7 +1642,7 @@ void test_vloxseg2ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_tumu( @@ -1655,7 +1655,7 @@ void test_vloxseg2ei8_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_tumu( @@ -1668,7 +1668,7 @@ void test_vloxseg2ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_tumu( @@ -1681,7 +1681,7 @@ void test_vloxseg2ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_tumu( @@ -1694,7 +1694,7 @@ void test_vloxseg2ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_tumu( @@ -1707,7 +1707,7 @@ void test_vloxseg2ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_tumu( @@ -1720,7 +1720,7 @@ void test_vloxseg2ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_tumu( @@ -1733,7 +1733,7 @@ void test_vloxseg2ei8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_tumu( @@ -1746,7 +1746,7 @@ void test_vloxseg2ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_tumu( @@ -1759,7 +1759,7 @@ void test_vloxseg2ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_tumu( @@ -1772,7 +1772,7 @@ void test_vloxseg2ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_tumu( @@ -1785,7 +1785,7 @@ void test_vloxseg2ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_tumu( @@ -1798,7 +1798,7 @@ void test_vloxseg2ei8_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_tumu( @@ -1811,7 +1811,7 @@ void test_vloxseg2ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_tumu( @@ -1824,7 +1824,7 @@ void test_vloxseg2ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_tumu( @@ -1837,7 +1837,7 @@ void test_vloxseg2ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_tumu( @@ -1850,7 +1850,7 @@ void test_vloxseg2ei8_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_tumu( @@ -1863,7 +1863,7 @@ void test_vloxseg2ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_tumu( @@ -1876,7 +1876,7 @@ void test_vloxseg2ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_mu( @@ -1889,7 +1889,7 @@ void test_vloxseg2ei8_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_mu( @@ -1902,7 +1902,7 @@ void test_vloxseg2ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_mu( @@ -1915,7 +1915,7 @@ void test_vloxseg2ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_mu( @@ -1928,7 +1928,7 @@ void test_vloxseg2ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_mu( @@ -1941,7 +1941,7 @@ void test_vloxseg2ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_mu( @@ -1954,7 +1954,7 @@ void test_vloxseg2ei8_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_mu( @@ -1967,7 +1967,7 @@ void test_vloxseg2ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_mu( @@ -1980,7 +1980,7 @@ void test_vloxseg2ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_mu( @@ -1993,7 +1993,7 @@ void test_vloxseg2ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_mu( @@ -2006,7 +2006,7 @@ void test_vloxseg2ei8_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_mu( @@ -2019,7 +2019,7 @@ void test_vloxseg2ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_mu( @@ -2032,7 +2032,7 @@ void test_vloxseg2ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_mu( @@ -2045,7 +2045,7 @@ void test_vloxseg2ei8_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_mu( @@ -2058,7 +2058,7 @@ void test_vloxseg2ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_mu( @@ -2071,7 +2071,7 @@ void test_vloxseg2ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_mu( @@ -2084,7 +2084,7 @@ void test_vloxseg2ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_mu( @@ -2097,7 +2097,7 @@ void test_vloxseg2ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_mu( @@ -2110,7 +2110,7 @@ void test_vloxseg2ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_mu( @@ -2123,7 +2123,7 @@ void test_vloxseg2ei8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_mu( @@ -2136,7 +2136,7 @@ void test_vloxseg2ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_mu( @@ -2149,7 +2149,7 @@ void test_vloxseg2ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_mu( @@ -2162,7 +2162,7 @@ void test_vloxseg2ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_mu( @@ -2175,7 +2175,7 @@ void test_vloxseg2ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_mu( @@ -2188,7 +2188,7 @@ void test_vloxseg2ei8_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_mu( @@ -2201,7 +2201,7 @@ void test_vloxseg2ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_mu( @@ -2214,7 +2214,7 @@ void test_vloxseg2ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_mu( @@ -2227,7 +2227,7 @@ void test_vloxseg2ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_mu( @@ -2240,7 +2240,7 @@ void test_vloxseg2ei8_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_mu( @@ -2253,7 +2253,7 @@ void test_vloxseg2ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_mu( @@ -2266,7 +2266,7 @@ void test_vloxseg2ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_mu( @@ -2279,7 +2279,7 @@ void test_vloxseg2ei8_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_mu( @@ -2292,7 +2292,7 @@ void test_vloxseg2ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_mu( @@ -2305,7 +2305,7 @@ void test_vloxseg2ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_mu( @@ -2318,7 +2318,7 @@ void test_vloxseg2ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_mu( @@ -2331,7 +2331,7 @@ void test_vloxseg2ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_mu( @@ -2344,7 +2344,7 @@ void test_vloxseg2ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_mu( @@ -2357,7 +2357,7 @@ void test_vloxseg2ei8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, v // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_mu( @@ -2370,7 +2370,7 @@ void test_vloxseg2ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_mu( @@ -2383,7 +2383,7 @@ void test_vloxseg2ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_mu( @@ -2396,7 +2396,7 @@ void test_vloxseg2ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_mu( @@ -2409,7 +2409,7 @@ void test_vloxseg2ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_mu( @@ -2422,7 +2422,7 @@ void test_vloxseg2ei8_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_mu( @@ -2435,7 +2435,7 @@ void test_vloxseg2ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_mu( @@ -2448,7 +2448,7 @@ void test_vloxseg2ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_mu( @@ -2461,7 +2461,7 @@ void test_vloxseg2ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_mu( @@ -2474,7 +2474,7 @@ void test_vloxseg2ei8_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_mu( @@ -2487,7 +2487,7 @@ void test_vloxseg2ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_mu( @@ -2500,6 +2500,6 @@ void test_vloxseg2ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vloxseg2ei8_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vloxseg2ei8_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c index 699bf4c1ca4b..4c1bf1610faa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei16.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_tu( @@ -34,7 +34,7 @@ void test_vloxseg3ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_tu( @@ -49,7 +49,7 @@ void test_vloxseg3ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_tu( @@ -64,7 +64,7 @@ void test_vloxseg3ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_tu( @@ -79,7 +79,7 @@ void test_vloxseg3ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_tu( @@ -94,7 +94,7 @@ void test_vloxseg3ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_tu( @@ -109,7 +109,7 @@ void test_vloxseg3ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_tu( @@ -124,7 +124,7 @@ void test_vloxseg3ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_tu( @@ -139,7 +139,7 @@ void test_vloxseg3ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_tu( @@ -154,7 +154,7 @@ void test_vloxseg3ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_tu( @@ -169,7 +169,7 @@ void test_vloxseg3ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_tu( @@ -184,7 +184,7 @@ void test_vloxseg3ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_tu( @@ -199,7 +199,7 @@ void test_vloxseg3ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_tu( @@ -214,7 +214,7 @@ void test_vloxseg3ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_tu( @@ -229,7 +229,7 @@ void test_vloxseg3ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_tu( @@ -244,7 +244,7 @@ void test_vloxseg3ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_tu( @@ -259,7 +259,7 @@ void test_vloxseg3ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_tu( @@ -274,7 +274,7 @@ void test_vloxseg3ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_tu( @@ -289,7 +289,7 @@ void test_vloxseg3ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_tu( @@ -304,7 +304,7 @@ void test_vloxseg3ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_tu( @@ -319,7 +319,7 @@ void test_vloxseg3ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_tu( @@ -334,7 +334,7 @@ void test_vloxseg3ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_tu( @@ -349,7 +349,7 @@ void test_vloxseg3ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_tu( @@ -364,7 +364,7 @@ void test_vloxseg3ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_tu( @@ -379,7 +379,7 @@ void test_vloxseg3ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_tu( @@ -394,7 +394,7 @@ void test_vloxseg3ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_tu( @@ -409,7 +409,7 @@ void test_vloxseg3ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_tu( @@ -424,7 +424,7 @@ void test_vloxseg3ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_tu( @@ -439,7 +439,7 @@ void test_vloxseg3ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_tu( @@ -454,7 +454,7 @@ void test_vloxseg3ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_tu( @@ -469,7 +469,7 @@ void test_vloxseg3ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_tu( @@ -484,7 +484,7 @@ void test_vloxseg3ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_tu( @@ -499,7 +499,7 @@ void test_vloxseg3ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_tu( @@ -514,7 +514,7 @@ void test_vloxseg3ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_tu( @@ -529,7 +529,7 @@ void test_vloxseg3ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_tu( @@ -544,7 +544,7 @@ void test_vloxseg3ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_tu( @@ -559,7 +559,7 @@ void test_vloxseg3ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_tum( @@ -574,7 +574,7 @@ void test_vloxseg3ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_tum( @@ -589,7 +589,7 @@ void test_vloxseg3ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_tum( @@ -604,7 +604,7 @@ void test_vloxseg3ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_tum( @@ -619,7 +619,7 @@ void test_vloxseg3ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vloxseg3ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_tum( @@ -649,7 +649,7 @@ void test_vloxseg3ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_tum( @@ -664,7 +664,7 @@ void test_vloxseg3ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_tum( @@ -679,7 +679,7 @@ void test_vloxseg3ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_tum( @@ -694,7 +694,7 @@ void test_vloxseg3ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_tum( @@ -709,7 +709,7 @@ void test_vloxseg3ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_tum( @@ -724,7 +724,7 @@ void test_vloxseg3ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vloxseg3ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_tum( @@ -754,7 +754,7 @@ void test_vloxseg3ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_tum( @@ -769,7 +769,7 @@ void test_vloxseg3ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_tum( @@ -784,7 +784,7 @@ void test_vloxseg3ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_tum( @@ -799,7 +799,7 @@ void test_vloxseg3ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_tum( @@ -814,7 +814,7 @@ void test_vloxseg3ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_tum( @@ -829,7 +829,7 @@ void test_vloxseg3ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vloxseg3ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_tum( @@ -859,7 +859,7 @@ void test_vloxseg3ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_tum( @@ -874,7 +874,7 @@ void test_vloxseg3ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_tum( @@ -889,7 +889,7 @@ void test_vloxseg3ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_tum( @@ -904,7 +904,7 @@ void test_vloxseg3ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_tum( @@ -919,7 +919,7 @@ void test_vloxseg3ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_tum( @@ -934,7 +934,7 @@ void test_vloxseg3ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vloxseg3ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_tum( @@ -964,7 +964,7 @@ void test_vloxseg3ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_tum( @@ -979,7 +979,7 @@ void test_vloxseg3ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_tum( @@ -994,7 +994,7 @@ void test_vloxseg3ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_tum( @@ -1009,7 +1009,7 @@ void test_vloxseg3ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_tum( @@ -1024,7 +1024,7 @@ void test_vloxseg3ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_tum( @@ -1039,7 +1039,7 @@ void test_vloxseg3ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg3ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_tum( @@ -1069,7 +1069,7 @@ void test_vloxseg3ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_tum( @@ -1084,7 +1084,7 @@ void test_vloxseg3ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_tum( @@ -1099,7 +1099,7 @@ void test_vloxseg3ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_tum( @@ -1114,7 +1114,7 @@ void test_vloxseg3ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_tumu( @@ -1129,7 +1129,7 @@ void test_vloxseg3ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_tumu( @@ -1144,7 +1144,7 @@ void test_vloxseg3ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vloxseg3ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_tumu( @@ -1174,7 +1174,7 @@ void test_vloxseg3ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_tumu( @@ -1189,7 +1189,7 @@ void test_vloxseg3ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_tumu( @@ -1204,7 +1204,7 @@ void test_vloxseg3ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_tumu( @@ -1219,7 +1219,7 @@ void test_vloxseg3ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_tumu( @@ -1234,7 +1234,7 @@ void test_vloxseg3ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_tumu( @@ -1249,7 +1249,7 @@ void test_vloxseg3ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_tumu( @@ -1264,7 +1264,7 @@ void test_vloxseg3ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vloxseg3ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_tumu( @@ -1294,7 +1294,7 @@ void test_vloxseg3ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_tumu( @@ -1309,7 +1309,7 @@ void test_vloxseg3ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_tumu( @@ -1324,7 +1324,7 @@ void test_vloxseg3ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_tumu( @@ -1339,7 +1339,7 @@ void test_vloxseg3ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vloxseg3ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg3ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_tumu( @@ -1384,7 +1384,7 @@ void test_vloxseg3ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_tumu( @@ -1399,7 +1399,7 @@ void test_vloxseg3ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_tumu( @@ -1414,7 +1414,7 @@ void test_vloxseg3ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg3ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_tumu( @@ -1444,7 +1444,7 @@ void test_vloxseg3ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_tumu( @@ -1459,7 +1459,7 @@ void test_vloxseg3ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_tumu( @@ -1474,7 +1474,7 @@ void test_vloxseg3ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_tumu( @@ -1489,7 +1489,7 @@ void test_vloxseg3ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_tumu( @@ -1504,7 +1504,7 @@ void test_vloxseg3ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_tumu( @@ -1519,7 +1519,7 @@ void test_vloxseg3ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_tumu( @@ -1534,7 +1534,7 @@ void test_vloxseg3ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_tumu( @@ -1549,7 +1549,7 @@ void test_vloxseg3ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_tumu( @@ -1564,7 +1564,7 @@ void test_vloxseg3ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg3ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_tumu( @@ -1594,7 +1594,7 @@ void test_vloxseg3ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_tumu( @@ -1609,7 +1609,7 @@ void test_vloxseg3ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_tumu( @@ -1624,7 +1624,7 @@ void test_vloxseg3ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_tumu( @@ -1639,7 +1639,7 @@ void test_vloxseg3ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_tumu( @@ -1654,7 +1654,7 @@ void test_vloxseg3ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_tumu( @@ -1669,7 +1669,7 @@ void test_vloxseg3ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_mu( @@ -1684,7 +1684,7 @@ void test_vloxseg3ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_mu( @@ -1699,7 +1699,7 @@ void test_vloxseg3ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_mu( @@ -1714,7 +1714,7 @@ void test_vloxseg3ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_mu( @@ -1729,7 +1729,7 @@ void test_vloxseg3ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_mu( @@ -1744,7 +1744,7 @@ void test_vloxseg3ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_mu( @@ -1759,7 +1759,7 @@ void test_vloxseg3ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_mu( @@ -1774,7 +1774,7 @@ void test_vloxseg3ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_mu( @@ -1789,7 +1789,7 @@ void test_vloxseg3ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_mu( @@ -1804,7 +1804,7 @@ void test_vloxseg3ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_mu( @@ -1819,7 +1819,7 @@ void test_vloxseg3ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_mu( @@ -1834,7 +1834,7 @@ void test_vloxseg3ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_mu( @@ -1849,7 +1849,7 @@ void test_vloxseg3ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_mu( @@ -1864,7 +1864,7 @@ void test_vloxseg3ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_mu( @@ -1879,7 +1879,7 @@ void test_vloxseg3ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_mu( @@ -1894,7 +1894,7 @@ void test_vloxseg3ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_mu( @@ -1909,7 +1909,7 @@ void test_vloxseg3ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_mu( @@ -1924,7 +1924,7 @@ void test_vloxseg3ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_mu( @@ -1939,7 +1939,7 @@ void test_vloxseg3ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_mu( @@ -1954,7 +1954,7 @@ void test_vloxseg3ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_mu( @@ -1969,7 +1969,7 @@ void test_vloxseg3ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_mu( @@ -1984,7 +1984,7 @@ void test_vloxseg3ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_mu( @@ -1999,7 +1999,7 @@ void test_vloxseg3ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_mu( @@ -2014,7 +2014,7 @@ void test_vloxseg3ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_mu( @@ -2029,7 +2029,7 @@ void test_vloxseg3ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_mu( @@ -2044,7 +2044,7 @@ void test_vloxseg3ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_mu( @@ -2059,7 +2059,7 @@ void test_vloxseg3ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_mu( @@ -2074,7 +2074,7 @@ void test_vloxseg3ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_mu( @@ -2089,7 +2089,7 @@ void test_vloxseg3ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_mu( @@ -2104,7 +2104,7 @@ void test_vloxseg3ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_mu( @@ -2119,7 +2119,7 @@ void test_vloxseg3ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_mu( @@ -2134,7 +2134,7 @@ void test_vloxseg3ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_mu( @@ -2149,7 +2149,7 @@ void test_vloxseg3ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_mu( @@ -2164,7 +2164,7 @@ void test_vloxseg3ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_mu( @@ -2179,7 +2179,7 @@ void test_vloxseg3ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_mu( @@ -2194,7 +2194,7 @@ void test_vloxseg3ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_mu( @@ -2209,7 +2209,7 @@ void test_vloxseg3ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_mu( @@ -2224,6 +2224,6 @@ void test_vloxseg3ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei16_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c index 56b53ad0269d..caa9ac746ea5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei32.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_tu( @@ -34,7 +34,7 @@ void test_vloxseg3ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_tu( @@ -49,7 +49,7 @@ void test_vloxseg3ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_tu( @@ -64,7 +64,7 @@ void test_vloxseg3ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_tu( @@ -79,7 +79,7 @@ void test_vloxseg3ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_tu( @@ -94,7 +94,7 @@ void test_vloxseg3ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_tu( @@ -109,7 +109,7 @@ void test_vloxseg3ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_tu( @@ -124,7 +124,7 @@ void test_vloxseg3ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_tu( @@ -139,7 +139,7 @@ void test_vloxseg3ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_tu( @@ -154,7 +154,7 @@ void test_vloxseg3ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_tu( @@ -169,7 +169,7 @@ void test_vloxseg3ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_tu( @@ -184,7 +184,7 @@ void test_vloxseg3ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_tu( @@ -199,7 +199,7 @@ void test_vloxseg3ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_tu( @@ -214,7 +214,7 @@ void test_vloxseg3ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_tu( @@ -229,7 +229,7 @@ void test_vloxseg3ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_tu( @@ -244,7 +244,7 @@ void test_vloxseg3ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_tu( @@ -259,7 +259,7 @@ void test_vloxseg3ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_tu( @@ -274,7 +274,7 @@ void test_vloxseg3ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_tu( @@ -289,7 +289,7 @@ void test_vloxseg3ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_tu( @@ -304,7 +304,7 @@ void test_vloxseg3ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_tu( @@ -319,7 +319,7 @@ void test_vloxseg3ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_tu( @@ -334,7 +334,7 @@ void test_vloxseg3ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_tu( @@ -349,7 +349,7 @@ void test_vloxseg3ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_tu( @@ -364,7 +364,7 @@ void test_vloxseg3ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_tu( @@ -379,7 +379,7 @@ void test_vloxseg3ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_tu( @@ -394,7 +394,7 @@ void test_vloxseg3ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_tu( @@ -409,7 +409,7 @@ void test_vloxseg3ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_tu( @@ -424,7 +424,7 @@ void test_vloxseg3ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_tu( @@ -439,7 +439,7 @@ void test_vloxseg3ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_tu( @@ -454,7 +454,7 @@ void test_vloxseg3ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_tu( @@ -469,7 +469,7 @@ void test_vloxseg3ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_tu( @@ -484,7 +484,7 @@ void test_vloxseg3ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_tu( @@ -499,7 +499,7 @@ void test_vloxseg3ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_tu( @@ -514,7 +514,7 @@ void test_vloxseg3ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_tu( @@ -529,7 +529,7 @@ void test_vloxseg3ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_tu( @@ -544,7 +544,7 @@ void test_vloxseg3ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_tu( @@ -559,7 +559,7 @@ void test_vloxseg3ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_tum( @@ -574,7 +574,7 @@ void test_vloxseg3ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_tum( @@ -589,7 +589,7 @@ void test_vloxseg3ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_tum( @@ -604,7 +604,7 @@ void test_vloxseg3ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_tum( @@ -619,7 +619,7 @@ void test_vloxseg3ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vloxseg3ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_tum( @@ -649,7 +649,7 @@ void test_vloxseg3ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_tum( @@ -664,7 +664,7 @@ void test_vloxseg3ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_tum( @@ -679,7 +679,7 @@ void test_vloxseg3ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_tum( @@ -694,7 +694,7 @@ void test_vloxseg3ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_tum( @@ -709,7 +709,7 @@ void test_vloxseg3ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_tum( @@ -724,7 +724,7 @@ void test_vloxseg3ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vloxseg3ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_tum( @@ -754,7 +754,7 @@ void test_vloxseg3ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_tum( @@ -769,7 +769,7 @@ void test_vloxseg3ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_tum( @@ -784,7 +784,7 @@ void test_vloxseg3ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_tum( @@ -799,7 +799,7 @@ void test_vloxseg3ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_tum( @@ -814,7 +814,7 @@ void test_vloxseg3ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_tum( @@ -829,7 +829,7 @@ void test_vloxseg3ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vloxseg3ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_tum( @@ -859,7 +859,7 @@ void test_vloxseg3ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_tum( @@ -874,7 +874,7 @@ void test_vloxseg3ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_tum( @@ -889,7 +889,7 @@ void test_vloxseg3ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_tum( @@ -904,7 +904,7 @@ void test_vloxseg3ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_tum( @@ -919,7 +919,7 @@ void test_vloxseg3ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_tum( @@ -934,7 +934,7 @@ void test_vloxseg3ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vloxseg3ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_tum( @@ -964,7 +964,7 @@ void test_vloxseg3ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_tum( @@ -979,7 +979,7 @@ void test_vloxseg3ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_tum( @@ -994,7 +994,7 @@ void test_vloxseg3ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_tum( @@ -1009,7 +1009,7 @@ void test_vloxseg3ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_tum( @@ -1024,7 +1024,7 @@ void test_vloxseg3ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_tum( @@ -1039,7 +1039,7 @@ void test_vloxseg3ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg3ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_tum( @@ -1069,7 +1069,7 @@ void test_vloxseg3ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_tum( @@ -1084,7 +1084,7 @@ void test_vloxseg3ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_tum( @@ -1099,7 +1099,7 @@ void test_vloxseg3ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_tum( @@ -1114,7 +1114,7 @@ void test_vloxseg3ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_tumu( @@ -1129,7 +1129,7 @@ void test_vloxseg3ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_tumu( @@ -1144,7 +1144,7 @@ void test_vloxseg3ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vloxseg3ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_tumu( @@ -1174,7 +1174,7 @@ void test_vloxseg3ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_tumu( @@ -1189,7 +1189,7 @@ void test_vloxseg3ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_tumu( @@ -1204,7 +1204,7 @@ void test_vloxseg3ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_tumu( @@ -1219,7 +1219,7 @@ void test_vloxseg3ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_tumu( @@ -1234,7 +1234,7 @@ void test_vloxseg3ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_tumu( @@ -1249,7 +1249,7 @@ void test_vloxseg3ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_tumu( @@ -1264,7 +1264,7 @@ void test_vloxseg3ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vloxseg3ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_tumu( @@ -1294,7 +1294,7 @@ void test_vloxseg3ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_tumu( @@ -1309,7 +1309,7 @@ void test_vloxseg3ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_tumu( @@ -1324,7 +1324,7 @@ void test_vloxseg3ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_tumu( @@ -1339,7 +1339,7 @@ void test_vloxseg3ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vloxseg3ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg3ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_tumu( @@ -1384,7 +1384,7 @@ void test_vloxseg3ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_tumu( @@ -1399,7 +1399,7 @@ void test_vloxseg3ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_tumu( @@ -1414,7 +1414,7 @@ void test_vloxseg3ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg3ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_tumu( @@ -1444,7 +1444,7 @@ void test_vloxseg3ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_tumu( @@ -1459,7 +1459,7 @@ void test_vloxseg3ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_tumu( @@ -1474,7 +1474,7 @@ void test_vloxseg3ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_tumu( @@ -1489,7 +1489,7 @@ void test_vloxseg3ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_tumu( @@ -1504,7 +1504,7 @@ void test_vloxseg3ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_tumu( @@ -1519,7 +1519,7 @@ void test_vloxseg3ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_tumu( @@ -1534,7 +1534,7 @@ void test_vloxseg3ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_tumu( @@ -1549,7 +1549,7 @@ void test_vloxseg3ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_tumu( @@ -1564,7 +1564,7 @@ void test_vloxseg3ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg3ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_tumu( @@ -1594,7 +1594,7 @@ void test_vloxseg3ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_tumu( @@ -1609,7 +1609,7 @@ void test_vloxseg3ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_tumu( @@ -1624,7 +1624,7 @@ void test_vloxseg3ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_tumu( @@ -1639,7 +1639,7 @@ void test_vloxseg3ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_tumu( @@ -1654,7 +1654,7 @@ void test_vloxseg3ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_tumu( @@ -1669,7 +1669,7 @@ void test_vloxseg3ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_mu( @@ -1684,7 +1684,7 @@ void test_vloxseg3ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_mu( @@ -1699,7 +1699,7 @@ void test_vloxseg3ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_mu( @@ -1714,7 +1714,7 @@ void test_vloxseg3ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_mu( @@ -1729,7 +1729,7 @@ void test_vloxseg3ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_mu( @@ -1744,7 +1744,7 @@ void test_vloxseg3ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_mu( @@ -1759,7 +1759,7 @@ void test_vloxseg3ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_mu( @@ -1774,7 +1774,7 @@ void test_vloxseg3ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_mu( @@ -1789,7 +1789,7 @@ void test_vloxseg3ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_mu( @@ -1804,7 +1804,7 @@ void test_vloxseg3ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_mu( @@ -1819,7 +1819,7 @@ void test_vloxseg3ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_mu( @@ -1834,7 +1834,7 @@ void test_vloxseg3ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_mu( @@ -1849,7 +1849,7 @@ void test_vloxseg3ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_mu( @@ -1864,7 +1864,7 @@ void test_vloxseg3ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_mu( @@ -1879,7 +1879,7 @@ void test_vloxseg3ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_mu( @@ -1894,7 +1894,7 @@ void test_vloxseg3ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_mu( @@ -1909,7 +1909,7 @@ void test_vloxseg3ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_mu( @@ -1924,7 +1924,7 @@ void test_vloxseg3ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_mu( @@ -1939,7 +1939,7 @@ void test_vloxseg3ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_mu( @@ -1954,7 +1954,7 @@ void test_vloxseg3ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_mu( @@ -1969,7 +1969,7 @@ void test_vloxseg3ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_mu( @@ -1984,7 +1984,7 @@ void test_vloxseg3ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_mu( @@ -1999,7 +1999,7 @@ void test_vloxseg3ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_mu( @@ -2014,7 +2014,7 @@ void test_vloxseg3ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_mu( @@ -2029,7 +2029,7 @@ void test_vloxseg3ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_mu( @@ -2044,7 +2044,7 @@ void test_vloxseg3ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_mu( @@ -2059,7 +2059,7 @@ void test_vloxseg3ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_mu( @@ -2074,7 +2074,7 @@ void test_vloxseg3ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_mu( @@ -2089,7 +2089,7 @@ void test_vloxseg3ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_mu( @@ -2104,7 +2104,7 @@ void test_vloxseg3ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_mu( @@ -2119,7 +2119,7 @@ void test_vloxseg3ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_mu( @@ -2134,7 +2134,7 @@ void test_vloxseg3ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_mu( @@ -2149,7 +2149,7 @@ void test_vloxseg3ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_mu( @@ -2164,7 +2164,7 @@ void test_vloxseg3ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_mu( @@ -2179,7 +2179,7 @@ void test_vloxseg3ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_mu( @@ -2194,7 +2194,7 @@ void test_vloxseg3ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_mu( @@ -2209,7 +2209,7 @@ void test_vloxseg3ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_mu( @@ -2224,6 +2224,6 @@ void test_vloxseg3ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei32_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c index 1fde250eb294..96747594a4c5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei64.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_tu( @@ -34,7 +34,7 @@ void test_vloxseg3ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_tu( @@ -49,7 +49,7 @@ void test_vloxseg3ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_tu( @@ -64,7 +64,7 @@ void test_vloxseg3ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_tu( @@ -79,7 +79,7 @@ void test_vloxseg3ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_tu( @@ -94,7 +94,7 @@ void test_vloxseg3ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_tu( @@ -109,7 +109,7 @@ void test_vloxseg3ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_tu( @@ -124,7 +124,7 @@ void test_vloxseg3ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_tu( @@ -139,7 +139,7 @@ void test_vloxseg3ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_tu( @@ -154,7 +154,7 @@ void test_vloxseg3ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_tu( @@ -169,7 +169,7 @@ void test_vloxseg3ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_tu( @@ -184,7 +184,7 @@ void test_vloxseg3ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_tu( @@ -199,7 +199,7 @@ void test_vloxseg3ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_tu( @@ -214,7 +214,7 @@ void test_vloxseg3ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_tu( @@ -229,7 +229,7 @@ void test_vloxseg3ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_tu( @@ -244,7 +244,7 @@ void test_vloxseg3ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_tu( @@ -259,7 +259,7 @@ void test_vloxseg3ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_tu( @@ -274,7 +274,7 @@ void test_vloxseg3ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_tu( @@ -289,7 +289,7 @@ void test_vloxseg3ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_tu( @@ -304,7 +304,7 @@ void test_vloxseg3ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_tu( @@ -319,7 +319,7 @@ void test_vloxseg3ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_tu( @@ -334,7 +334,7 @@ void test_vloxseg3ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_tu( @@ -349,7 +349,7 @@ void test_vloxseg3ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_tu( @@ -364,7 +364,7 @@ void test_vloxseg3ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_tu( @@ -379,7 +379,7 @@ void test_vloxseg3ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_tu( @@ -394,7 +394,7 @@ void test_vloxseg3ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_tu( @@ -409,7 +409,7 @@ void test_vloxseg3ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_tu( @@ -424,7 +424,7 @@ void test_vloxseg3ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_tu( @@ -439,7 +439,7 @@ void test_vloxseg3ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_tu( @@ -454,7 +454,7 @@ void test_vloxseg3ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_tu( @@ -469,7 +469,7 @@ void test_vloxseg3ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_tu( @@ -484,7 +484,7 @@ void test_vloxseg3ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_tu( @@ -499,7 +499,7 @@ void test_vloxseg3ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_tu( @@ -514,7 +514,7 @@ void test_vloxseg3ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_tu( @@ -529,7 +529,7 @@ void test_vloxseg3ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_tum( @@ -544,7 +544,7 @@ void test_vloxseg3ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_tum( @@ -559,7 +559,7 @@ void test_vloxseg3ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_tum( @@ -574,7 +574,7 @@ void test_vloxseg3ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_tum( @@ -589,7 +589,7 @@ void test_vloxseg3ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_tum( @@ -604,7 +604,7 @@ void test_vloxseg3ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_tum( @@ -619,7 +619,7 @@ void test_vloxseg3ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_tum( @@ -634,7 +634,7 @@ void test_vloxseg3ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_tum( @@ -649,7 +649,7 @@ void test_vloxseg3ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_tum( @@ -664,7 +664,7 @@ void test_vloxseg3ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_tum( @@ -679,7 +679,7 @@ void test_vloxseg3ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_tum( @@ -694,7 +694,7 @@ void test_vloxseg3ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_tum( @@ -709,7 +709,7 @@ void test_vloxseg3ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_tum( @@ -724,7 +724,7 @@ void test_vloxseg3ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_tum( @@ -739,7 +739,7 @@ void test_vloxseg3ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_tum( @@ -754,7 +754,7 @@ void test_vloxseg3ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_tum( @@ -769,7 +769,7 @@ void test_vloxseg3ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_tum( @@ -784,7 +784,7 @@ void test_vloxseg3ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_tum( @@ -799,7 +799,7 @@ void test_vloxseg3ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_tum( @@ -814,7 +814,7 @@ void test_vloxseg3ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_tum( @@ -829,7 +829,7 @@ void test_vloxseg3ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_tum( @@ -844,7 +844,7 @@ void test_vloxseg3ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_tum( @@ -859,7 +859,7 @@ void test_vloxseg3ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_tum( @@ -874,7 +874,7 @@ void test_vloxseg3ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_tum( @@ -889,7 +889,7 @@ void test_vloxseg3ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_tum( @@ -904,7 +904,7 @@ void test_vloxseg3ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_tum( @@ -919,7 +919,7 @@ void test_vloxseg3ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_tum( @@ -934,7 +934,7 @@ void test_vloxseg3ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_tum( @@ -949,7 +949,7 @@ void test_vloxseg3ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_tum( @@ -964,7 +964,7 @@ void test_vloxseg3ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_tum( @@ -979,7 +979,7 @@ void test_vloxseg3ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_tum( @@ -994,7 +994,7 @@ void test_vloxseg3ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_tum( @@ -1009,7 +1009,7 @@ void test_vloxseg3ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_tum( @@ -1024,7 +1024,7 @@ void test_vloxseg3ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_tum( @@ -1039,7 +1039,7 @@ void test_vloxseg3ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg3ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_tumu( @@ -1069,7 +1069,7 @@ void test_vloxseg3ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_tumu( @@ -1084,7 +1084,7 @@ void test_vloxseg3ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_tumu( @@ -1099,7 +1099,7 @@ void test_vloxseg3ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_tumu( @@ -1114,7 +1114,7 @@ void test_vloxseg3ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_tumu( @@ -1129,7 +1129,7 @@ void test_vloxseg3ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_tumu( @@ -1144,7 +1144,7 @@ void test_vloxseg3ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_tumu( @@ -1159,7 +1159,7 @@ void test_vloxseg3ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_tumu( @@ -1174,7 +1174,7 @@ void test_vloxseg3ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_tumu( @@ -1189,7 +1189,7 @@ void test_vloxseg3ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_tumu( @@ -1204,7 +1204,7 @@ void test_vloxseg3ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_tumu( @@ -1219,7 +1219,7 @@ void test_vloxseg3ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_tumu( @@ -1234,7 +1234,7 @@ void test_vloxseg3ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_tumu( @@ -1249,7 +1249,7 @@ void test_vloxseg3ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_tumu( @@ -1264,7 +1264,7 @@ void test_vloxseg3ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_tumu( @@ -1279,7 +1279,7 @@ void test_vloxseg3ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_tumu( @@ -1294,7 +1294,7 @@ void test_vloxseg3ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_tumu( @@ -1309,7 +1309,7 @@ void test_vloxseg3ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_tumu( @@ -1324,7 +1324,7 @@ void test_vloxseg3ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_tumu( @@ -1339,7 +1339,7 @@ void test_vloxseg3ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_tumu( @@ -1354,7 +1354,7 @@ void test_vloxseg3ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg3ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_tumu( @@ -1384,7 +1384,7 @@ void test_vloxseg3ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_tumu( @@ -1399,7 +1399,7 @@ void test_vloxseg3ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_tumu( @@ -1414,7 +1414,7 @@ void test_vloxseg3ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg3ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_tumu( @@ -1444,7 +1444,7 @@ void test_vloxseg3ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_tumu( @@ -1459,7 +1459,7 @@ void test_vloxseg3ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_tumu( @@ -1474,7 +1474,7 @@ void test_vloxseg3ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_tumu( @@ -1489,7 +1489,7 @@ void test_vloxseg3ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_tumu( @@ -1504,7 +1504,7 @@ void test_vloxseg3ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_tumu( @@ -1519,7 +1519,7 @@ void test_vloxseg3ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_tumu( @@ -1534,7 +1534,7 @@ void test_vloxseg3ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_tumu( @@ -1549,7 +1549,7 @@ void test_vloxseg3ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_tumu( @@ -1564,7 +1564,7 @@ void test_vloxseg3ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg3ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_mu( @@ -1594,7 +1594,7 @@ void test_vloxseg3ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_mu( @@ -1609,7 +1609,7 @@ void test_vloxseg3ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_mu( @@ -1624,7 +1624,7 @@ void test_vloxseg3ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_mu( @@ -1639,7 +1639,7 @@ void test_vloxseg3ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_mu( @@ -1654,7 +1654,7 @@ void test_vloxseg3ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_mu( @@ -1669,7 +1669,7 @@ void test_vloxseg3ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_mu( @@ -1684,7 +1684,7 @@ void test_vloxseg3ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_mu( @@ -1699,7 +1699,7 @@ void test_vloxseg3ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_mu( @@ -1714,7 +1714,7 @@ void test_vloxseg3ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_mu( @@ -1729,7 +1729,7 @@ void test_vloxseg3ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_mu( @@ -1744,7 +1744,7 @@ void test_vloxseg3ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_mu( @@ -1759,7 +1759,7 @@ void test_vloxseg3ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_mu( @@ -1774,7 +1774,7 @@ void test_vloxseg3ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_mu( @@ -1789,7 +1789,7 @@ void test_vloxseg3ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_mu( @@ -1804,7 +1804,7 @@ void test_vloxseg3ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_mu( @@ -1819,7 +1819,7 @@ void test_vloxseg3ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_mu( @@ -1834,7 +1834,7 @@ void test_vloxseg3ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_mu( @@ -1849,7 +1849,7 @@ void test_vloxseg3ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_mu( @@ -1864,7 +1864,7 @@ void test_vloxseg3ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_mu( @@ -1879,7 +1879,7 @@ void test_vloxseg3ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_mu( @@ -1894,7 +1894,7 @@ void test_vloxseg3ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_mu( @@ -1909,7 +1909,7 @@ void test_vloxseg3ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_mu( @@ -1924,7 +1924,7 @@ void test_vloxseg3ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_mu( @@ -1939,7 +1939,7 @@ void test_vloxseg3ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_mu( @@ -1954,7 +1954,7 @@ void test_vloxseg3ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_mu( @@ -1969,7 +1969,7 @@ void test_vloxseg3ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_mu( @@ -1984,7 +1984,7 @@ void test_vloxseg3ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_mu( @@ -1999,7 +1999,7 @@ void test_vloxseg3ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_mu( @@ -2014,7 +2014,7 @@ void test_vloxseg3ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_mu( @@ -2029,7 +2029,7 @@ void test_vloxseg3ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_mu( @@ -2044,7 +2044,7 @@ void test_vloxseg3ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_mu( @@ -2059,7 +2059,7 @@ void test_vloxseg3ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_mu( @@ -2074,7 +2074,7 @@ void test_vloxseg3ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_mu( @@ -2089,7 +2089,7 @@ void test_vloxseg3ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_mu( @@ -2104,6 +2104,6 @@ void test_vloxseg3ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei64_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c index 858eee7724aa..b38fc15c7eca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg3ei8.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_tu( @@ -34,7 +34,7 @@ void test_vloxseg3ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_tu( @@ -49,7 +49,7 @@ void test_vloxseg3ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_tu( @@ -64,7 +64,7 @@ void test_vloxseg3ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_tu( @@ -79,7 +79,7 @@ void test_vloxseg3ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_tu( @@ -94,7 +94,7 @@ void test_vloxseg3ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_tu( @@ -109,7 +109,7 @@ void test_vloxseg3ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_tu( @@ -124,7 +124,7 @@ void test_vloxseg3ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_tu( @@ -139,7 +139,7 @@ void test_vloxseg3ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_tu( @@ -154,7 +154,7 @@ void test_vloxseg3ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_tu( @@ -169,7 +169,7 @@ void test_vloxseg3ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_tu( @@ -184,7 +184,7 @@ void test_vloxseg3ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_tu( @@ -199,7 +199,7 @@ void test_vloxseg3ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_tu( @@ -214,7 +214,7 @@ void test_vloxseg3ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_tu( @@ -229,7 +229,7 @@ void test_vloxseg3ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_tu( @@ -244,7 +244,7 @@ void test_vloxseg3ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_tu( @@ -259,7 +259,7 @@ void test_vloxseg3ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_tu( @@ -274,7 +274,7 @@ void test_vloxseg3ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_tu( @@ -289,7 +289,7 @@ void test_vloxseg3ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_tu( @@ -304,7 +304,7 @@ void test_vloxseg3ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_tu( @@ -319,7 +319,7 @@ void test_vloxseg3ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_tu( @@ -334,7 +334,7 @@ void test_vloxseg3ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_tu( @@ -349,7 +349,7 @@ void test_vloxseg3ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_tu( @@ -364,7 +364,7 @@ void test_vloxseg3ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_tu( @@ -379,7 +379,7 @@ void test_vloxseg3ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_tu( @@ -394,7 +394,7 @@ void test_vloxseg3ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_tu( @@ -409,7 +409,7 @@ void test_vloxseg3ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_tu( @@ -424,7 +424,7 @@ void test_vloxseg3ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_tu( @@ -439,7 +439,7 @@ void test_vloxseg3ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_tu( @@ -454,7 +454,7 @@ void test_vloxseg3ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_tu( @@ -469,7 +469,7 @@ void test_vloxseg3ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_tu( @@ -484,7 +484,7 @@ void test_vloxseg3ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_tu( @@ -499,7 +499,7 @@ void test_vloxseg3ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_tu( @@ -514,7 +514,7 @@ void test_vloxseg3ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_tu( @@ -529,7 +529,7 @@ void test_vloxseg3ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_tu( @@ -544,7 +544,7 @@ void test_vloxseg3ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_tu( @@ -559,7 +559,7 @@ void test_vloxseg3ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_tum( @@ -574,7 +574,7 @@ void test_vloxseg3ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_tum( @@ -589,7 +589,7 @@ void test_vloxseg3ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_tum( @@ -604,7 +604,7 @@ void test_vloxseg3ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_tum( @@ -619,7 +619,7 @@ void test_vloxseg3ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vloxseg3ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_tum( @@ -649,7 +649,7 @@ void test_vloxseg3ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_tum( @@ -664,7 +664,7 @@ void test_vloxseg3ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_tum( @@ -679,7 +679,7 @@ void test_vloxseg3ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_tum( @@ -694,7 +694,7 @@ void test_vloxseg3ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_tum( @@ -709,7 +709,7 @@ void test_vloxseg3ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_tum( @@ -724,7 +724,7 @@ void test_vloxseg3ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vloxseg3ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_tum( @@ -754,7 +754,7 @@ void test_vloxseg3ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_tum( @@ -769,7 +769,7 @@ void test_vloxseg3ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_tum( @@ -784,7 +784,7 @@ void test_vloxseg3ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_tum( @@ -799,7 +799,7 @@ void test_vloxseg3ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_tum( @@ -814,7 +814,7 @@ void test_vloxseg3ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_tum( @@ -829,7 +829,7 @@ void test_vloxseg3ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vloxseg3ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_tum( @@ -859,7 +859,7 @@ void test_vloxseg3ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_tum( @@ -874,7 +874,7 @@ void test_vloxseg3ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_tum( @@ -889,7 +889,7 @@ void test_vloxseg3ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_tum( @@ -904,7 +904,7 @@ void test_vloxseg3ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_tum( @@ -919,7 +919,7 @@ void test_vloxseg3ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_tum( @@ -934,7 +934,7 @@ void test_vloxseg3ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vloxseg3ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_tum( @@ -964,7 +964,7 @@ void test_vloxseg3ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_tum( @@ -979,7 +979,7 @@ void test_vloxseg3ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_tum( @@ -994,7 +994,7 @@ void test_vloxseg3ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_tum( @@ -1009,7 +1009,7 @@ void test_vloxseg3ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_tum( @@ -1024,7 +1024,7 @@ void test_vloxseg3ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_tum( @@ -1039,7 +1039,7 @@ void test_vloxseg3ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg3ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_tum( @@ -1069,7 +1069,7 @@ void test_vloxseg3ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_tum( @@ -1084,7 +1084,7 @@ void test_vloxseg3ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_tum( @@ -1099,7 +1099,7 @@ void test_vloxseg3ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_tum( @@ -1114,7 +1114,7 @@ void test_vloxseg3ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_tumu( @@ -1129,7 +1129,7 @@ void test_vloxseg3ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_tumu( @@ -1144,7 +1144,7 @@ void test_vloxseg3ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vloxseg3ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_tumu( @@ -1174,7 +1174,7 @@ void test_vloxseg3ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_tumu( @@ -1189,7 +1189,7 @@ void test_vloxseg3ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_tumu( @@ -1204,7 +1204,7 @@ void test_vloxseg3ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_tumu( @@ -1219,7 +1219,7 @@ void test_vloxseg3ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_tumu( @@ -1234,7 +1234,7 @@ void test_vloxseg3ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_tumu( @@ -1249,7 +1249,7 @@ void test_vloxseg3ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_tumu( @@ -1264,7 +1264,7 @@ void test_vloxseg3ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vloxseg3ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_tumu( @@ -1294,7 +1294,7 @@ void test_vloxseg3ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_tumu( @@ -1309,7 +1309,7 @@ void test_vloxseg3ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_tumu( @@ -1324,7 +1324,7 @@ void test_vloxseg3ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_tumu( @@ -1339,7 +1339,7 @@ void test_vloxseg3ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vloxseg3ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg3ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_tumu( @@ -1384,7 +1384,7 @@ void test_vloxseg3ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_tumu( @@ -1399,7 +1399,7 @@ void test_vloxseg3ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_tumu( @@ -1414,7 +1414,7 @@ void test_vloxseg3ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg3ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_tumu( @@ -1444,7 +1444,7 @@ void test_vloxseg3ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_tumu( @@ -1459,7 +1459,7 @@ void test_vloxseg3ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_tumu( @@ -1474,7 +1474,7 @@ void test_vloxseg3ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_tumu( @@ -1489,7 +1489,7 @@ void test_vloxseg3ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_tumu( @@ -1504,7 +1504,7 @@ void test_vloxseg3ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_tumu( @@ -1519,7 +1519,7 @@ void test_vloxseg3ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_tumu( @@ -1534,7 +1534,7 @@ void test_vloxseg3ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_tumu( @@ -1549,7 +1549,7 @@ void test_vloxseg3ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_tumu( @@ -1564,7 +1564,7 @@ void test_vloxseg3ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg3ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_tumu( @@ -1594,7 +1594,7 @@ void test_vloxseg3ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_tumu( @@ -1609,7 +1609,7 @@ void test_vloxseg3ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_tumu( @@ -1624,7 +1624,7 @@ void test_vloxseg3ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_tumu( @@ -1639,7 +1639,7 @@ void test_vloxseg3ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_tumu( @@ -1654,7 +1654,7 @@ void test_vloxseg3ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_tumu( @@ -1669,7 +1669,7 @@ void test_vloxseg3ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_mu( @@ -1684,7 +1684,7 @@ void test_vloxseg3ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_mu( @@ -1699,7 +1699,7 @@ void test_vloxseg3ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_mu( @@ -1714,7 +1714,7 @@ void test_vloxseg3ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_mu( @@ -1729,7 +1729,7 @@ void test_vloxseg3ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_mu( @@ -1744,7 +1744,7 @@ void test_vloxseg3ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_mu( @@ -1759,7 +1759,7 @@ void test_vloxseg3ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_mu( @@ -1774,7 +1774,7 @@ void test_vloxseg3ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_mu( @@ -1789,7 +1789,7 @@ void test_vloxseg3ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_mu( @@ -1804,7 +1804,7 @@ void test_vloxseg3ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_mu( @@ -1819,7 +1819,7 @@ void test_vloxseg3ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_mu( @@ -1834,7 +1834,7 @@ void test_vloxseg3ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_mu( @@ -1849,7 +1849,7 @@ void test_vloxseg3ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_mu( @@ -1864,7 +1864,7 @@ void test_vloxseg3ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_mu( @@ -1879,7 +1879,7 @@ void test_vloxseg3ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_mu( @@ -1894,7 +1894,7 @@ void test_vloxseg3ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_mu( @@ -1909,7 +1909,7 @@ void test_vloxseg3ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_mu( @@ -1924,7 +1924,7 @@ void test_vloxseg3ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_mu( @@ -1939,7 +1939,7 @@ void test_vloxseg3ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_mu( @@ -1954,7 +1954,7 @@ void test_vloxseg3ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_mu( @@ -1969,7 +1969,7 @@ void test_vloxseg3ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_mu( @@ -1984,7 +1984,7 @@ void test_vloxseg3ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_mu( @@ -1999,7 +1999,7 @@ void test_vloxseg3ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_mu( @@ -2014,7 +2014,7 @@ void test_vloxseg3ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_mu( @@ -2029,7 +2029,7 @@ void test_vloxseg3ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_mu( @@ -2044,7 +2044,7 @@ void test_vloxseg3ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_mu( @@ -2059,7 +2059,7 @@ void test_vloxseg3ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_mu( @@ -2074,7 +2074,7 @@ void test_vloxseg3ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_mu( @@ -2089,7 +2089,7 @@ void test_vloxseg3ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_mu( @@ -2104,7 +2104,7 @@ void test_vloxseg3ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_mu( @@ -2119,7 +2119,7 @@ void test_vloxseg3ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_mu( @@ -2134,7 +2134,7 @@ void test_vloxseg3ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_mu( @@ -2149,7 +2149,7 @@ void test_vloxseg3ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_mu( @@ -2164,7 +2164,7 @@ void test_vloxseg3ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_mu( @@ -2179,7 +2179,7 @@ void test_vloxseg3ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_mu( @@ -2194,7 +2194,7 @@ void test_vloxseg3ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_mu( @@ -2209,7 +2209,7 @@ void test_vloxseg3ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_mu( @@ -2224,6 +2224,6 @@ void test_vloxseg3ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg3ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vloxseg3ei8_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c index 9266a675e6ea..4d004b8eb594 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei16.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_tu( @@ -38,7 +38,7 @@ void test_vloxseg4ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_tu( @@ -55,7 +55,7 @@ void test_vloxseg4ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_tu( @@ -72,7 +72,7 @@ void test_vloxseg4ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_tu( @@ -89,7 +89,7 @@ void test_vloxseg4ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_tu( @@ -106,7 +106,7 @@ void test_vloxseg4ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_tu( @@ -123,7 +123,7 @@ void test_vloxseg4ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_tu( @@ -140,7 +140,7 @@ void test_vloxseg4ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_tu( @@ -157,7 +157,7 @@ void test_vloxseg4ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_tu( @@ -174,7 +174,7 @@ void test_vloxseg4ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_tu( @@ -191,7 +191,7 @@ void test_vloxseg4ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_tu( @@ -208,7 +208,7 @@ void test_vloxseg4ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_tu( @@ -225,7 +225,7 @@ void test_vloxseg4ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_tu( @@ -242,7 +242,7 @@ void test_vloxseg4ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_tu( @@ -259,7 +259,7 @@ void test_vloxseg4ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_tu( @@ -276,7 +276,7 @@ void test_vloxseg4ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_tu( @@ -293,7 +293,7 @@ void test_vloxseg4ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_tu( @@ -310,7 +310,7 @@ void test_vloxseg4ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_tu( @@ -327,7 +327,7 @@ void test_vloxseg4ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_tu( @@ -344,7 +344,7 @@ void test_vloxseg4ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_tu( @@ -361,7 +361,7 @@ void test_vloxseg4ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_tu( @@ -378,7 +378,7 @@ void test_vloxseg4ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_tu( @@ -395,7 +395,7 @@ void test_vloxseg4ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_tu( @@ -412,7 +412,7 @@ void test_vloxseg4ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_tu( @@ -429,7 +429,7 @@ void test_vloxseg4ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_tu( @@ -446,7 +446,7 @@ void test_vloxseg4ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_tu( @@ -463,7 +463,7 @@ void test_vloxseg4ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_tu( @@ -480,7 +480,7 @@ void test_vloxseg4ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_tu( @@ -497,7 +497,7 @@ void test_vloxseg4ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_tu( @@ -514,7 +514,7 @@ void test_vloxseg4ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_tu( @@ -531,7 +531,7 @@ void test_vloxseg4ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_tu( @@ -548,7 +548,7 @@ void test_vloxseg4ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_tu( @@ -565,7 +565,7 @@ void test_vloxseg4ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_tu( @@ -582,7 +582,7 @@ void test_vloxseg4ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_tu( @@ -599,7 +599,7 @@ void test_vloxseg4ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_tu( @@ -616,7 +616,7 @@ void test_vloxseg4ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_tu( @@ -633,7 +633,7 @@ void test_vloxseg4ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_tum( @@ -650,7 +650,7 @@ void test_vloxseg4ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_tum( @@ -667,7 +667,7 @@ void test_vloxseg4ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_tum( @@ -684,7 +684,7 @@ void test_vloxseg4ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_tum( @@ -701,7 +701,7 @@ void test_vloxseg4ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_tum( @@ -718,7 +718,7 @@ void test_vloxseg4ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_tum( @@ -735,7 +735,7 @@ void test_vloxseg4ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_tum( @@ -752,7 +752,7 @@ void test_vloxseg4ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_tum( @@ -769,7 +769,7 @@ void test_vloxseg4ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_tum( @@ -786,7 +786,7 @@ void test_vloxseg4ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_tum( @@ -803,7 +803,7 @@ void test_vloxseg4ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_tum( @@ -820,7 +820,7 @@ void test_vloxseg4ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_tum( @@ -837,7 +837,7 @@ void test_vloxseg4ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_tum( @@ -854,7 +854,7 @@ void test_vloxseg4ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_tum( @@ -871,7 +871,7 @@ void test_vloxseg4ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_tum( @@ -888,7 +888,7 @@ void test_vloxseg4ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_tum( @@ -905,7 +905,7 @@ void test_vloxseg4ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_tum( @@ -922,7 +922,7 @@ void test_vloxseg4ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_tum( @@ -939,7 +939,7 @@ void test_vloxseg4ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_tum( @@ -956,7 +956,7 @@ void test_vloxseg4ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_tum( @@ -973,7 +973,7 @@ void test_vloxseg4ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_tum( @@ -990,7 +990,7 @@ void test_vloxseg4ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_tum( @@ -1007,7 +1007,7 @@ void test_vloxseg4ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_tum( @@ -1024,7 +1024,7 @@ void test_vloxseg4ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_tum( @@ -1041,7 +1041,7 @@ void test_vloxseg4ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_tum( @@ -1058,7 +1058,7 @@ void test_vloxseg4ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_tum( @@ -1075,7 +1075,7 @@ void test_vloxseg4ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_tum( @@ -1092,7 +1092,7 @@ void test_vloxseg4ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_tum( @@ -1109,7 +1109,7 @@ void test_vloxseg4ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_tum( @@ -1126,7 +1126,7 @@ void test_vloxseg4ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_tum( @@ -1143,7 +1143,7 @@ void test_vloxseg4ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_tum( @@ -1160,7 +1160,7 @@ void test_vloxseg4ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_tum( @@ -1177,7 +1177,7 @@ void test_vloxseg4ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_tum( @@ -1194,7 +1194,7 @@ void test_vloxseg4ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_tum( @@ -1211,7 +1211,7 @@ void test_vloxseg4ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_tum( @@ -1228,7 +1228,7 @@ void test_vloxseg4ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_tum( @@ -1245,7 +1245,7 @@ void test_vloxseg4ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_tum( @@ -1262,7 +1262,7 @@ void test_vloxseg4ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vloxseg4ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_tumu( @@ -1296,7 +1296,7 @@ void test_vloxseg4ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_tumu( @@ -1313,7 +1313,7 @@ void test_vloxseg4ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_tumu( @@ -1330,7 +1330,7 @@ void test_vloxseg4ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_tumu( @@ -1347,7 +1347,7 @@ void test_vloxseg4ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_tumu( @@ -1364,7 +1364,7 @@ void test_vloxseg4ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_tumu( @@ -1381,7 +1381,7 @@ void test_vloxseg4ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_tumu( @@ -1398,7 +1398,7 @@ void test_vloxseg4ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_tumu( @@ -1415,7 +1415,7 @@ void test_vloxseg4ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_tumu( @@ -1432,7 +1432,7 @@ void test_vloxseg4ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_tumu( @@ -1449,7 +1449,7 @@ void test_vloxseg4ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_tumu( @@ -1466,7 +1466,7 @@ void test_vloxseg4ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_tumu( @@ -1483,7 +1483,7 @@ void test_vloxseg4ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_tumu( @@ -1500,7 +1500,7 @@ void test_vloxseg4ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_tumu( @@ -1517,7 +1517,7 @@ void test_vloxseg4ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_tumu( @@ -1534,7 +1534,7 @@ void test_vloxseg4ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_tumu( @@ -1551,7 +1551,7 @@ void test_vloxseg4ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_tumu( @@ -1568,7 +1568,7 @@ void test_vloxseg4ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_tumu( @@ -1585,7 +1585,7 @@ void test_vloxseg4ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_tumu( @@ -1602,7 +1602,7 @@ void test_vloxseg4ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_tumu( @@ -1619,7 +1619,7 @@ void test_vloxseg4ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_tumu( @@ -1636,7 +1636,7 @@ void test_vloxseg4ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_tumu( @@ -1653,7 +1653,7 @@ void test_vloxseg4ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_tumu( @@ -1670,7 +1670,7 @@ void test_vloxseg4ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_tumu( @@ -1687,7 +1687,7 @@ void test_vloxseg4ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_tumu( @@ -1704,7 +1704,7 @@ void test_vloxseg4ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_tumu( @@ -1721,7 +1721,7 @@ void test_vloxseg4ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_tumu( @@ -1738,7 +1738,7 @@ void test_vloxseg4ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_tumu( @@ -1755,7 +1755,7 @@ void test_vloxseg4ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_tumu( @@ -1772,7 +1772,7 @@ void test_vloxseg4ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_tumu( @@ -1789,7 +1789,7 @@ void test_vloxseg4ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_tumu( @@ -1806,7 +1806,7 @@ void test_vloxseg4ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_tumu( @@ -1823,7 +1823,7 @@ void test_vloxseg4ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_tumu( @@ -1840,7 +1840,7 @@ void test_vloxseg4ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_tumu( @@ -1857,7 +1857,7 @@ void test_vloxseg4ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_tumu( @@ -1874,7 +1874,7 @@ void test_vloxseg4ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_tumu( @@ -1891,7 +1891,7 @@ void test_vloxseg4ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_mu( @@ -1908,7 +1908,7 @@ void test_vloxseg4ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_mu( @@ -1925,7 +1925,7 @@ void test_vloxseg4ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_mu( @@ -1942,7 +1942,7 @@ void test_vloxseg4ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_mu( @@ -1959,7 +1959,7 @@ void test_vloxseg4ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_mu( @@ -1976,7 +1976,7 @@ void test_vloxseg4ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_mu( @@ -1993,7 +1993,7 @@ void test_vloxseg4ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_mu( @@ -2010,7 +2010,7 @@ void test_vloxseg4ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_mu( @@ -2027,7 +2027,7 @@ void test_vloxseg4ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_mu( @@ -2044,7 +2044,7 @@ void test_vloxseg4ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_mu( @@ -2061,7 +2061,7 @@ void test_vloxseg4ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_mu( @@ -2078,7 +2078,7 @@ void test_vloxseg4ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_mu( @@ -2095,7 +2095,7 @@ void test_vloxseg4ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_mu( @@ -2112,7 +2112,7 @@ void test_vloxseg4ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_mu( @@ -2129,7 +2129,7 @@ void test_vloxseg4ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_mu( @@ -2146,7 +2146,7 @@ void test_vloxseg4ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_mu( @@ -2163,7 +2163,7 @@ void test_vloxseg4ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_mu( @@ -2180,7 +2180,7 @@ void test_vloxseg4ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_mu( @@ -2197,7 +2197,7 @@ void test_vloxseg4ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_mu( @@ -2214,7 +2214,7 @@ void test_vloxseg4ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_mu( @@ -2231,7 +2231,7 @@ void test_vloxseg4ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_mu( @@ -2248,7 +2248,7 @@ void test_vloxseg4ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_mu( @@ -2265,7 +2265,7 @@ void test_vloxseg4ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_mu( @@ -2282,7 +2282,7 @@ void test_vloxseg4ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_mu( @@ -2299,7 +2299,7 @@ void test_vloxseg4ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_mu( @@ -2316,7 +2316,7 @@ void test_vloxseg4ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_mu( @@ -2333,7 +2333,7 @@ void test_vloxseg4ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_mu( @@ -2350,7 +2350,7 @@ void test_vloxseg4ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_mu( @@ -2367,7 +2367,7 @@ void test_vloxseg4ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_mu( @@ -2384,7 +2384,7 @@ void test_vloxseg4ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_mu( @@ -2401,7 +2401,7 @@ void test_vloxseg4ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_mu( @@ -2418,7 +2418,7 @@ void test_vloxseg4ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_mu( @@ -2435,7 +2435,7 @@ void test_vloxseg4ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_mu( @@ -2452,7 +2452,7 @@ void test_vloxseg4ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_mu( @@ -2469,7 +2469,7 @@ void test_vloxseg4ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_mu( @@ -2486,7 +2486,7 @@ void test_vloxseg4ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_mu( @@ -2503,7 +2503,7 @@ void test_vloxseg4ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_mu( @@ -2520,6 +2520,6 @@ void test_vloxseg4ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei16_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c index 60254f337bd2..a12fa8d2f4f2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei32.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_tu( @@ -38,7 +38,7 @@ void test_vloxseg4ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_tu( @@ -55,7 +55,7 @@ void test_vloxseg4ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_tu( @@ -72,7 +72,7 @@ void test_vloxseg4ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_tu( @@ -89,7 +89,7 @@ void test_vloxseg4ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_tu( @@ -106,7 +106,7 @@ void test_vloxseg4ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_tu( @@ -123,7 +123,7 @@ void test_vloxseg4ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_tu( @@ -140,7 +140,7 @@ void test_vloxseg4ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_tu( @@ -157,7 +157,7 @@ void test_vloxseg4ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_tu( @@ -174,7 +174,7 @@ void test_vloxseg4ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_tu( @@ -191,7 +191,7 @@ void test_vloxseg4ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_tu( @@ -208,7 +208,7 @@ void test_vloxseg4ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_tu( @@ -225,7 +225,7 @@ void test_vloxseg4ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_tu( @@ -242,7 +242,7 @@ void test_vloxseg4ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_tu( @@ -259,7 +259,7 @@ void test_vloxseg4ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_tu( @@ -276,7 +276,7 @@ void test_vloxseg4ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_tu( @@ -293,7 +293,7 @@ void test_vloxseg4ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_tu( @@ -310,7 +310,7 @@ void test_vloxseg4ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_tu( @@ -327,7 +327,7 @@ void test_vloxseg4ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_tu( @@ -344,7 +344,7 @@ void test_vloxseg4ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_tu( @@ -361,7 +361,7 @@ void test_vloxseg4ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_tu( @@ -378,7 +378,7 @@ void test_vloxseg4ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_tu( @@ -395,7 +395,7 @@ void test_vloxseg4ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_tu( @@ -412,7 +412,7 @@ void test_vloxseg4ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_tu( @@ -429,7 +429,7 @@ void test_vloxseg4ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_tu( @@ -446,7 +446,7 @@ void test_vloxseg4ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_tu( @@ -463,7 +463,7 @@ void test_vloxseg4ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_tu( @@ -480,7 +480,7 @@ void test_vloxseg4ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_tu( @@ -497,7 +497,7 @@ void test_vloxseg4ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_tu( @@ -514,7 +514,7 @@ void test_vloxseg4ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_tu( @@ -531,7 +531,7 @@ void test_vloxseg4ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_tu( @@ -548,7 +548,7 @@ void test_vloxseg4ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_tu( @@ -565,7 +565,7 @@ void test_vloxseg4ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_tu( @@ -582,7 +582,7 @@ void test_vloxseg4ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_tu( @@ -599,7 +599,7 @@ void test_vloxseg4ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_tu( @@ -616,7 +616,7 @@ void test_vloxseg4ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_tu( @@ -633,7 +633,7 @@ void test_vloxseg4ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_tum( @@ -650,7 +650,7 @@ void test_vloxseg4ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_tum( @@ -667,7 +667,7 @@ void test_vloxseg4ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_tum( @@ -684,7 +684,7 @@ void test_vloxseg4ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_tum( @@ -701,7 +701,7 @@ void test_vloxseg4ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_tum( @@ -718,7 +718,7 @@ void test_vloxseg4ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_tum( @@ -735,7 +735,7 @@ void test_vloxseg4ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_tum( @@ -752,7 +752,7 @@ void test_vloxseg4ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_tum( @@ -769,7 +769,7 @@ void test_vloxseg4ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_tum( @@ -786,7 +786,7 @@ void test_vloxseg4ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_tum( @@ -803,7 +803,7 @@ void test_vloxseg4ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_tum( @@ -820,7 +820,7 @@ void test_vloxseg4ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_tum( @@ -837,7 +837,7 @@ void test_vloxseg4ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_tum( @@ -854,7 +854,7 @@ void test_vloxseg4ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_tum( @@ -871,7 +871,7 @@ void test_vloxseg4ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_tum( @@ -888,7 +888,7 @@ void test_vloxseg4ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_tum( @@ -905,7 +905,7 @@ void test_vloxseg4ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_tum( @@ -922,7 +922,7 @@ void test_vloxseg4ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_tum( @@ -939,7 +939,7 @@ void test_vloxseg4ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_tum( @@ -956,7 +956,7 @@ void test_vloxseg4ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_tum( @@ -973,7 +973,7 @@ void test_vloxseg4ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_tum( @@ -990,7 +990,7 @@ void test_vloxseg4ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_tum( @@ -1007,7 +1007,7 @@ void test_vloxseg4ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_tum( @@ -1024,7 +1024,7 @@ void test_vloxseg4ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_tum( @@ -1041,7 +1041,7 @@ void test_vloxseg4ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_tum( @@ -1058,7 +1058,7 @@ void test_vloxseg4ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_tum( @@ -1075,7 +1075,7 @@ void test_vloxseg4ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_tum( @@ -1092,7 +1092,7 @@ void test_vloxseg4ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_tum( @@ -1109,7 +1109,7 @@ void test_vloxseg4ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_tum( @@ -1126,7 +1126,7 @@ void test_vloxseg4ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_tum( @@ -1143,7 +1143,7 @@ void test_vloxseg4ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_tum( @@ -1160,7 +1160,7 @@ void test_vloxseg4ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_tum( @@ -1177,7 +1177,7 @@ void test_vloxseg4ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_tum( @@ -1194,7 +1194,7 @@ void test_vloxseg4ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_tum( @@ -1211,7 +1211,7 @@ void test_vloxseg4ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_tum( @@ -1228,7 +1228,7 @@ void test_vloxseg4ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_tum( @@ -1245,7 +1245,7 @@ void test_vloxseg4ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_tum( @@ -1262,7 +1262,7 @@ void test_vloxseg4ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vloxseg4ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_tumu( @@ -1296,7 +1296,7 @@ void test_vloxseg4ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_tumu( @@ -1313,7 +1313,7 @@ void test_vloxseg4ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_tumu( @@ -1330,7 +1330,7 @@ void test_vloxseg4ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_tumu( @@ -1347,7 +1347,7 @@ void test_vloxseg4ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_tumu( @@ -1364,7 +1364,7 @@ void test_vloxseg4ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_tumu( @@ -1381,7 +1381,7 @@ void test_vloxseg4ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_tumu( @@ -1398,7 +1398,7 @@ void test_vloxseg4ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_tumu( @@ -1415,7 +1415,7 @@ void test_vloxseg4ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_tumu( @@ -1432,7 +1432,7 @@ void test_vloxseg4ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_tumu( @@ -1449,7 +1449,7 @@ void test_vloxseg4ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_tumu( @@ -1466,7 +1466,7 @@ void test_vloxseg4ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_tumu( @@ -1483,7 +1483,7 @@ void test_vloxseg4ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_tumu( @@ -1500,7 +1500,7 @@ void test_vloxseg4ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_tumu( @@ -1517,7 +1517,7 @@ void test_vloxseg4ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_tumu( @@ -1534,7 +1534,7 @@ void test_vloxseg4ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_tumu( @@ -1551,7 +1551,7 @@ void test_vloxseg4ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_tumu( @@ -1568,7 +1568,7 @@ void test_vloxseg4ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_tumu( @@ -1585,7 +1585,7 @@ void test_vloxseg4ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_tumu( @@ -1602,7 +1602,7 @@ void test_vloxseg4ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_tumu( @@ -1619,7 +1619,7 @@ void test_vloxseg4ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_tumu( @@ -1636,7 +1636,7 @@ void test_vloxseg4ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_tumu( @@ -1653,7 +1653,7 @@ void test_vloxseg4ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_tumu( @@ -1670,7 +1670,7 @@ void test_vloxseg4ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_tumu( @@ -1687,7 +1687,7 @@ void test_vloxseg4ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_tumu( @@ -1704,7 +1704,7 @@ void test_vloxseg4ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_tumu( @@ -1721,7 +1721,7 @@ void test_vloxseg4ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_tumu( @@ -1738,7 +1738,7 @@ void test_vloxseg4ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_tumu( @@ -1755,7 +1755,7 @@ void test_vloxseg4ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_tumu( @@ -1772,7 +1772,7 @@ void test_vloxseg4ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_tumu( @@ -1789,7 +1789,7 @@ void test_vloxseg4ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_tumu( @@ -1806,7 +1806,7 @@ void test_vloxseg4ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_tumu( @@ -1823,7 +1823,7 @@ void test_vloxseg4ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_tumu( @@ -1840,7 +1840,7 @@ void test_vloxseg4ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_tumu( @@ -1857,7 +1857,7 @@ void test_vloxseg4ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_tumu( @@ -1874,7 +1874,7 @@ void test_vloxseg4ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_tumu( @@ -1891,7 +1891,7 @@ void test_vloxseg4ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_mu( @@ -1908,7 +1908,7 @@ void test_vloxseg4ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_mu( @@ -1925,7 +1925,7 @@ void test_vloxseg4ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_mu( @@ -1942,7 +1942,7 @@ void test_vloxseg4ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_mu( @@ -1959,7 +1959,7 @@ void test_vloxseg4ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_mu( @@ -1976,7 +1976,7 @@ void test_vloxseg4ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_mu( @@ -1993,7 +1993,7 @@ void test_vloxseg4ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_mu( @@ -2010,7 +2010,7 @@ void test_vloxseg4ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_mu( @@ -2027,7 +2027,7 @@ void test_vloxseg4ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_mu( @@ -2044,7 +2044,7 @@ void test_vloxseg4ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_mu( @@ -2061,7 +2061,7 @@ void test_vloxseg4ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_mu( @@ -2078,7 +2078,7 @@ void test_vloxseg4ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_mu( @@ -2095,7 +2095,7 @@ void test_vloxseg4ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_mu( @@ -2112,7 +2112,7 @@ void test_vloxseg4ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_mu( @@ -2129,7 +2129,7 @@ void test_vloxseg4ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_mu( @@ -2146,7 +2146,7 @@ void test_vloxseg4ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_mu( @@ -2163,7 +2163,7 @@ void test_vloxseg4ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_mu( @@ -2180,7 +2180,7 @@ void test_vloxseg4ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_mu( @@ -2197,7 +2197,7 @@ void test_vloxseg4ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_mu( @@ -2214,7 +2214,7 @@ void test_vloxseg4ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_mu( @@ -2231,7 +2231,7 @@ void test_vloxseg4ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_mu( @@ -2248,7 +2248,7 @@ void test_vloxseg4ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_mu( @@ -2265,7 +2265,7 @@ void test_vloxseg4ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_mu( @@ -2282,7 +2282,7 @@ void test_vloxseg4ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_mu( @@ -2299,7 +2299,7 @@ void test_vloxseg4ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_mu( @@ -2316,7 +2316,7 @@ void test_vloxseg4ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_mu( @@ -2333,7 +2333,7 @@ void test_vloxseg4ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_mu( @@ -2350,7 +2350,7 @@ void test_vloxseg4ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_mu( @@ -2367,7 +2367,7 @@ void test_vloxseg4ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_mu( @@ -2384,7 +2384,7 @@ void test_vloxseg4ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_mu( @@ -2401,7 +2401,7 @@ void test_vloxseg4ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_mu( @@ -2418,7 +2418,7 @@ void test_vloxseg4ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_mu( @@ -2435,7 +2435,7 @@ void test_vloxseg4ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_mu( @@ -2452,7 +2452,7 @@ void test_vloxseg4ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_mu( @@ -2469,7 +2469,7 @@ void test_vloxseg4ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_mu( @@ -2486,7 +2486,7 @@ void test_vloxseg4ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_mu( @@ -2503,7 +2503,7 @@ void test_vloxseg4ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_mu( @@ -2520,6 +2520,6 @@ void test_vloxseg4ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei32_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c index e4944fd92c7e..ec0012b2643a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei64.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_tu( @@ -38,7 +38,7 @@ void test_vloxseg4ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_tu( @@ -55,7 +55,7 @@ void test_vloxseg4ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_tu( @@ -72,7 +72,7 @@ void test_vloxseg4ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_tu( @@ -89,7 +89,7 @@ void test_vloxseg4ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_tu( @@ -106,7 +106,7 @@ void test_vloxseg4ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_tu( @@ -123,7 +123,7 @@ void test_vloxseg4ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_tu( @@ -140,7 +140,7 @@ void test_vloxseg4ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_tu( @@ -157,7 +157,7 @@ void test_vloxseg4ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_tu( @@ -174,7 +174,7 @@ void test_vloxseg4ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_tu( @@ -191,7 +191,7 @@ void test_vloxseg4ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_tu( @@ -208,7 +208,7 @@ void test_vloxseg4ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_tu( @@ -225,7 +225,7 @@ void test_vloxseg4ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_tu( @@ -242,7 +242,7 @@ void test_vloxseg4ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_tu( @@ -259,7 +259,7 @@ void test_vloxseg4ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_tu( @@ -276,7 +276,7 @@ void test_vloxseg4ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_tu( @@ -293,7 +293,7 @@ void test_vloxseg4ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_tu( @@ -310,7 +310,7 @@ void test_vloxseg4ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_tu( @@ -327,7 +327,7 @@ void test_vloxseg4ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_tu( @@ -344,7 +344,7 @@ void test_vloxseg4ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_tu( @@ -361,7 +361,7 @@ void test_vloxseg4ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_tu( @@ -378,7 +378,7 @@ void test_vloxseg4ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_tu( @@ -395,7 +395,7 @@ void test_vloxseg4ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_tu( @@ -412,7 +412,7 @@ void test_vloxseg4ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_tu( @@ -429,7 +429,7 @@ void test_vloxseg4ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_tu( @@ -446,7 +446,7 @@ void test_vloxseg4ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_tu( @@ -463,7 +463,7 @@ void test_vloxseg4ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_tu( @@ -480,7 +480,7 @@ void test_vloxseg4ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_tu( @@ -497,7 +497,7 @@ void test_vloxseg4ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_tu( @@ -514,7 +514,7 @@ void test_vloxseg4ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_tu( @@ -531,7 +531,7 @@ void test_vloxseg4ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_tu( @@ -548,7 +548,7 @@ void test_vloxseg4ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_tu( @@ -565,7 +565,7 @@ void test_vloxseg4ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_tu( @@ -582,7 +582,7 @@ void test_vloxseg4ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_tu( @@ -599,7 +599,7 @@ void test_vloxseg4ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_tum( @@ -616,7 +616,7 @@ void test_vloxseg4ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_tum( @@ -633,7 +633,7 @@ void test_vloxseg4ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_tum( @@ -650,7 +650,7 @@ void test_vloxseg4ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_tum( @@ -667,7 +667,7 @@ void test_vloxseg4ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_tum( @@ -684,7 +684,7 @@ void test_vloxseg4ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_tum( @@ -701,7 +701,7 @@ void test_vloxseg4ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_tum( @@ -718,7 +718,7 @@ void test_vloxseg4ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_tum( @@ -735,7 +735,7 @@ void test_vloxseg4ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_tum( @@ -752,7 +752,7 @@ void test_vloxseg4ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_tum( @@ -769,7 +769,7 @@ void test_vloxseg4ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_tum( @@ -786,7 +786,7 @@ void test_vloxseg4ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_tum( @@ -803,7 +803,7 @@ void test_vloxseg4ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_tum( @@ -820,7 +820,7 @@ void test_vloxseg4ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_tum( @@ -837,7 +837,7 @@ void test_vloxseg4ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_tum( @@ -854,7 +854,7 @@ void test_vloxseg4ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_tum( @@ -871,7 +871,7 @@ void test_vloxseg4ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_tum( @@ -888,7 +888,7 @@ void test_vloxseg4ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_tum( @@ -905,7 +905,7 @@ void test_vloxseg4ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_tum( @@ -922,7 +922,7 @@ void test_vloxseg4ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_tum( @@ -939,7 +939,7 @@ void test_vloxseg4ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_tum( @@ -956,7 +956,7 @@ void test_vloxseg4ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_tum( @@ -973,7 +973,7 @@ void test_vloxseg4ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_tum( @@ -990,7 +990,7 @@ void test_vloxseg4ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_tum( @@ -1007,7 +1007,7 @@ void test_vloxseg4ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_tum( @@ -1024,7 +1024,7 @@ void test_vloxseg4ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_tum( @@ -1041,7 +1041,7 @@ void test_vloxseg4ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_tum( @@ -1058,7 +1058,7 @@ void test_vloxseg4ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_tum( @@ -1075,7 +1075,7 @@ void test_vloxseg4ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_tum( @@ -1092,7 +1092,7 @@ void test_vloxseg4ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_tum( @@ -1109,7 +1109,7 @@ void test_vloxseg4ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_tum( @@ -1126,7 +1126,7 @@ void test_vloxseg4ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_tum( @@ -1143,7 +1143,7 @@ void test_vloxseg4ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_tum( @@ -1160,7 +1160,7 @@ void test_vloxseg4ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_tum( @@ -1177,7 +1177,7 @@ void test_vloxseg4ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_tum( @@ -1194,7 +1194,7 @@ void test_vloxseg4ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_tumu( @@ -1211,7 +1211,7 @@ void test_vloxseg4ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_tumu( @@ -1228,7 +1228,7 @@ void test_vloxseg4ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_tumu( @@ -1245,7 +1245,7 @@ void test_vloxseg4ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_tumu( @@ -1262,7 +1262,7 @@ void test_vloxseg4ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_tumu( @@ -1279,7 +1279,7 @@ void test_vloxseg4ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_tumu( @@ -1296,7 +1296,7 @@ void test_vloxseg4ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_tumu( @@ -1313,7 +1313,7 @@ void test_vloxseg4ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_tumu( @@ -1330,7 +1330,7 @@ void test_vloxseg4ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_tumu( @@ -1347,7 +1347,7 @@ void test_vloxseg4ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_tumu( @@ -1364,7 +1364,7 @@ void test_vloxseg4ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_tumu( @@ -1381,7 +1381,7 @@ void test_vloxseg4ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_tumu( @@ -1398,7 +1398,7 @@ void test_vloxseg4ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_tumu( @@ -1415,7 +1415,7 @@ void test_vloxseg4ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_tumu( @@ -1432,7 +1432,7 @@ void test_vloxseg4ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_tumu( @@ -1449,7 +1449,7 @@ void test_vloxseg4ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_tumu( @@ -1466,7 +1466,7 @@ void test_vloxseg4ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_tumu( @@ -1483,7 +1483,7 @@ void test_vloxseg4ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_tumu( @@ -1500,7 +1500,7 @@ void test_vloxseg4ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_tumu( @@ -1517,7 +1517,7 @@ void test_vloxseg4ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_tumu( @@ -1534,7 +1534,7 @@ void test_vloxseg4ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_tumu( @@ -1551,7 +1551,7 @@ void test_vloxseg4ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_tumu( @@ -1568,7 +1568,7 @@ void test_vloxseg4ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_tumu( @@ -1585,7 +1585,7 @@ void test_vloxseg4ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_tumu( @@ -1602,7 +1602,7 @@ void test_vloxseg4ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_tumu( @@ -1619,7 +1619,7 @@ void test_vloxseg4ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_tumu( @@ -1636,7 +1636,7 @@ void test_vloxseg4ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_tumu( @@ -1653,7 +1653,7 @@ void test_vloxseg4ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_tumu( @@ -1670,7 +1670,7 @@ void test_vloxseg4ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_tumu( @@ -1687,7 +1687,7 @@ void test_vloxseg4ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_tumu( @@ -1704,7 +1704,7 @@ void test_vloxseg4ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_tumu( @@ -1721,7 +1721,7 @@ void test_vloxseg4ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_tumu( @@ -1738,7 +1738,7 @@ void test_vloxseg4ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_tumu( @@ -1755,7 +1755,7 @@ void test_vloxseg4ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_tumu( @@ -1772,7 +1772,7 @@ void test_vloxseg4ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_tumu( @@ -1789,7 +1789,7 @@ void test_vloxseg4ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_mu( @@ -1806,7 +1806,7 @@ void test_vloxseg4ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_mu( @@ -1823,7 +1823,7 @@ void test_vloxseg4ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_mu( @@ -1840,7 +1840,7 @@ void test_vloxseg4ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_mu( @@ -1857,7 +1857,7 @@ void test_vloxseg4ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_mu( @@ -1874,7 +1874,7 @@ void test_vloxseg4ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_mu( @@ -1891,7 +1891,7 @@ void test_vloxseg4ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_mu( @@ -1908,7 +1908,7 @@ void test_vloxseg4ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_mu( @@ -1925,7 +1925,7 @@ void test_vloxseg4ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_mu( @@ -1942,7 +1942,7 @@ void test_vloxseg4ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_mu( @@ -1959,7 +1959,7 @@ void test_vloxseg4ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_mu( @@ -1976,7 +1976,7 @@ void test_vloxseg4ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_mu( @@ -1993,7 +1993,7 @@ void test_vloxseg4ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_mu( @@ -2010,7 +2010,7 @@ void test_vloxseg4ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_mu( @@ -2027,7 +2027,7 @@ void test_vloxseg4ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_mu( @@ -2044,7 +2044,7 @@ void test_vloxseg4ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_mu( @@ -2061,7 +2061,7 @@ void test_vloxseg4ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_mu( @@ -2078,7 +2078,7 @@ void test_vloxseg4ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_mu( @@ -2095,7 +2095,7 @@ void test_vloxseg4ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_mu( @@ -2112,7 +2112,7 @@ void test_vloxseg4ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_mu( @@ -2129,7 +2129,7 @@ void test_vloxseg4ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_mu( @@ -2146,7 +2146,7 @@ void test_vloxseg4ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_mu( @@ -2163,7 +2163,7 @@ void test_vloxseg4ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_mu( @@ -2180,7 +2180,7 @@ void test_vloxseg4ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_mu( @@ -2197,7 +2197,7 @@ void test_vloxseg4ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_mu( @@ -2214,7 +2214,7 @@ void test_vloxseg4ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_mu( @@ -2231,7 +2231,7 @@ void test_vloxseg4ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_mu( @@ -2248,7 +2248,7 @@ void test_vloxseg4ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_mu( @@ -2265,7 +2265,7 @@ void test_vloxseg4ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_mu( @@ -2282,7 +2282,7 @@ void test_vloxseg4ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_mu( @@ -2299,7 +2299,7 @@ void test_vloxseg4ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_mu( @@ -2316,7 +2316,7 @@ void test_vloxseg4ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_mu( @@ -2333,7 +2333,7 @@ void test_vloxseg4ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_mu( @@ -2350,7 +2350,7 @@ void test_vloxseg4ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_mu( @@ -2367,7 +2367,7 @@ void test_vloxseg4ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_mu( @@ -2384,6 +2384,6 @@ void test_vloxseg4ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei64_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c index 5eb8b57dbc00..baf8c560b740 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg4ei8.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_tu( @@ -38,7 +38,7 @@ void test_vloxseg4ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_tu( @@ -55,7 +55,7 @@ void test_vloxseg4ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_tu( @@ -72,7 +72,7 @@ void test_vloxseg4ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_tu( @@ -89,7 +89,7 @@ void test_vloxseg4ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_tu( @@ -106,7 +106,7 @@ void test_vloxseg4ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_tu( @@ -123,7 +123,7 @@ void test_vloxseg4ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_tu( @@ -140,7 +140,7 @@ void test_vloxseg4ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_tu( @@ -157,7 +157,7 @@ void test_vloxseg4ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_tu( @@ -174,7 +174,7 @@ void test_vloxseg4ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_tu( @@ -191,7 +191,7 @@ void test_vloxseg4ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_tu( @@ -208,7 +208,7 @@ void test_vloxseg4ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_tu( @@ -225,7 +225,7 @@ void test_vloxseg4ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_tu( @@ -242,7 +242,7 @@ void test_vloxseg4ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_tu( @@ -259,7 +259,7 @@ void test_vloxseg4ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_tu( @@ -276,7 +276,7 @@ void test_vloxseg4ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_tu( @@ -293,7 +293,7 @@ void test_vloxseg4ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_tu( @@ -310,7 +310,7 @@ void test_vloxseg4ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_tu( @@ -327,7 +327,7 @@ void test_vloxseg4ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_tu( @@ -344,7 +344,7 @@ void test_vloxseg4ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_tu( @@ -361,7 +361,7 @@ void test_vloxseg4ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_tu( @@ -378,7 +378,7 @@ void test_vloxseg4ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_tu( @@ -395,7 +395,7 @@ void test_vloxseg4ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_tu( @@ -412,7 +412,7 @@ void test_vloxseg4ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_tu( @@ -429,7 +429,7 @@ void test_vloxseg4ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_tu( @@ -446,7 +446,7 @@ void test_vloxseg4ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_tu( @@ -463,7 +463,7 @@ void test_vloxseg4ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_tu( @@ -480,7 +480,7 @@ void test_vloxseg4ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_tu( @@ -497,7 +497,7 @@ void test_vloxseg4ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_tu( @@ -514,7 +514,7 @@ void test_vloxseg4ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_tu( @@ -531,7 +531,7 @@ void test_vloxseg4ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_tu( @@ -548,7 +548,7 @@ void test_vloxseg4ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_tu( @@ -565,7 +565,7 @@ void test_vloxseg4ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_tu( @@ -582,7 +582,7 @@ void test_vloxseg4ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_tu( @@ -599,7 +599,7 @@ void test_vloxseg4ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_tu( @@ -616,7 +616,7 @@ void test_vloxseg4ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_tu( @@ -633,7 +633,7 @@ void test_vloxseg4ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_tum( @@ -650,7 +650,7 @@ void test_vloxseg4ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_tum( @@ -667,7 +667,7 @@ void test_vloxseg4ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_tum( @@ -684,7 +684,7 @@ void test_vloxseg4ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_tum( @@ -701,7 +701,7 @@ void test_vloxseg4ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_tum( @@ -718,7 +718,7 @@ void test_vloxseg4ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_tum( @@ -735,7 +735,7 @@ void test_vloxseg4ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_tum( @@ -752,7 +752,7 @@ void test_vloxseg4ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_tum( @@ -769,7 +769,7 @@ void test_vloxseg4ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_tum( @@ -786,7 +786,7 @@ void test_vloxseg4ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_tum( @@ -803,7 +803,7 @@ void test_vloxseg4ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_tum( @@ -820,7 +820,7 @@ void test_vloxseg4ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_tum( @@ -837,7 +837,7 @@ void test_vloxseg4ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_tum( @@ -854,7 +854,7 @@ void test_vloxseg4ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_tum( @@ -871,7 +871,7 @@ void test_vloxseg4ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_tum( @@ -888,7 +888,7 @@ void test_vloxseg4ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_tum( @@ -905,7 +905,7 @@ void test_vloxseg4ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_tum( @@ -922,7 +922,7 @@ void test_vloxseg4ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_tum( @@ -939,7 +939,7 @@ void test_vloxseg4ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_tum( @@ -956,7 +956,7 @@ void test_vloxseg4ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_tum( @@ -973,7 +973,7 @@ void test_vloxseg4ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_tum( @@ -990,7 +990,7 @@ void test_vloxseg4ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_tum( @@ -1007,7 +1007,7 @@ void test_vloxseg4ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_tum( @@ -1024,7 +1024,7 @@ void test_vloxseg4ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_tum( @@ -1041,7 +1041,7 @@ void test_vloxseg4ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_tum( @@ -1058,7 +1058,7 @@ void test_vloxseg4ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_tum( @@ -1075,7 +1075,7 @@ void test_vloxseg4ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_tum( @@ -1092,7 +1092,7 @@ void test_vloxseg4ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_tum( @@ -1109,7 +1109,7 @@ void test_vloxseg4ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_tum( @@ -1126,7 +1126,7 @@ void test_vloxseg4ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_tum( @@ -1143,7 +1143,7 @@ void test_vloxseg4ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_tum( @@ -1160,7 +1160,7 @@ void test_vloxseg4ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_tum( @@ -1177,7 +1177,7 @@ void test_vloxseg4ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_tum( @@ -1194,7 +1194,7 @@ void test_vloxseg4ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_tum( @@ -1211,7 +1211,7 @@ void test_vloxseg4ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_tum( @@ -1228,7 +1228,7 @@ void test_vloxseg4ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_tum( @@ -1245,7 +1245,7 @@ void test_vloxseg4ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_tum( @@ -1262,7 +1262,7 @@ void test_vloxseg4ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vloxseg4ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_tumu( @@ -1296,7 +1296,7 @@ void test_vloxseg4ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_tumu( @@ -1313,7 +1313,7 @@ void test_vloxseg4ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_tumu( @@ -1330,7 +1330,7 @@ void test_vloxseg4ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_tumu( @@ -1347,7 +1347,7 @@ void test_vloxseg4ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_tumu( @@ -1364,7 +1364,7 @@ void test_vloxseg4ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_tumu( @@ -1381,7 +1381,7 @@ void test_vloxseg4ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_tumu( @@ -1398,7 +1398,7 @@ void test_vloxseg4ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_tumu( @@ -1415,7 +1415,7 @@ void test_vloxseg4ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_tumu( @@ -1432,7 +1432,7 @@ void test_vloxseg4ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_tumu( @@ -1449,7 +1449,7 @@ void test_vloxseg4ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_tumu( @@ -1466,7 +1466,7 @@ void test_vloxseg4ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_tumu( @@ -1483,7 +1483,7 @@ void test_vloxseg4ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_tumu( @@ -1500,7 +1500,7 @@ void test_vloxseg4ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_tumu( @@ -1517,7 +1517,7 @@ void test_vloxseg4ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_tumu( @@ -1534,7 +1534,7 @@ void test_vloxseg4ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_tumu( @@ -1551,7 +1551,7 @@ void test_vloxseg4ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_tumu( @@ -1568,7 +1568,7 @@ void test_vloxseg4ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_tumu( @@ -1585,7 +1585,7 @@ void test_vloxseg4ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_tumu( @@ -1602,7 +1602,7 @@ void test_vloxseg4ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_tumu( @@ -1619,7 +1619,7 @@ void test_vloxseg4ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_tumu( @@ -1636,7 +1636,7 @@ void test_vloxseg4ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_tumu( @@ -1653,7 +1653,7 @@ void test_vloxseg4ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_tumu( @@ -1670,7 +1670,7 @@ void test_vloxseg4ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_tumu( @@ -1687,7 +1687,7 @@ void test_vloxseg4ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_tumu( @@ -1704,7 +1704,7 @@ void test_vloxseg4ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_tumu( @@ -1721,7 +1721,7 @@ void test_vloxseg4ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_tumu( @@ -1738,7 +1738,7 @@ void test_vloxseg4ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_tumu( @@ -1755,7 +1755,7 @@ void test_vloxseg4ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_tumu( @@ -1772,7 +1772,7 @@ void test_vloxseg4ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_tumu( @@ -1789,7 +1789,7 @@ void test_vloxseg4ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_tumu( @@ -1806,7 +1806,7 @@ void test_vloxseg4ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_tumu( @@ -1823,7 +1823,7 @@ void test_vloxseg4ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_tumu( @@ -1840,7 +1840,7 @@ void test_vloxseg4ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_tumu( @@ -1857,7 +1857,7 @@ void test_vloxseg4ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_tumu( @@ -1874,7 +1874,7 @@ void test_vloxseg4ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_tumu( @@ -1891,7 +1891,7 @@ void test_vloxseg4ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_mu( @@ -1908,7 +1908,7 @@ void test_vloxseg4ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_mu( @@ -1925,7 +1925,7 @@ void test_vloxseg4ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_mu( @@ -1942,7 +1942,7 @@ void test_vloxseg4ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_mu( @@ -1959,7 +1959,7 @@ void test_vloxseg4ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_mu( @@ -1976,7 +1976,7 @@ void test_vloxseg4ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_mu( @@ -1993,7 +1993,7 @@ void test_vloxseg4ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_mu( @@ -2010,7 +2010,7 @@ void test_vloxseg4ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_mu( @@ -2027,7 +2027,7 @@ void test_vloxseg4ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_mu( @@ -2044,7 +2044,7 @@ void test_vloxseg4ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_mu( @@ -2061,7 +2061,7 @@ void test_vloxseg4ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_mu( @@ -2078,7 +2078,7 @@ void test_vloxseg4ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_mu( @@ -2095,7 +2095,7 @@ void test_vloxseg4ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_mu( @@ -2112,7 +2112,7 @@ void test_vloxseg4ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_mu( @@ -2129,7 +2129,7 @@ void test_vloxseg4ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_mu( @@ -2146,7 +2146,7 @@ void test_vloxseg4ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_mu( @@ -2163,7 +2163,7 @@ void test_vloxseg4ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_mu( @@ -2180,7 +2180,7 @@ void test_vloxseg4ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_mu( @@ -2197,7 +2197,7 @@ void test_vloxseg4ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_mu( @@ -2214,7 +2214,7 @@ void test_vloxseg4ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_mu( @@ -2231,7 +2231,7 @@ void test_vloxseg4ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_mu( @@ -2248,7 +2248,7 @@ void test_vloxseg4ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_mu( @@ -2265,7 +2265,7 @@ void test_vloxseg4ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_mu( @@ -2282,7 +2282,7 @@ void test_vloxseg4ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_mu( @@ -2299,7 +2299,7 @@ void test_vloxseg4ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_mu( @@ -2316,7 +2316,7 @@ void test_vloxseg4ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_mu( @@ -2333,7 +2333,7 @@ void test_vloxseg4ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_mu( @@ -2350,7 +2350,7 @@ void test_vloxseg4ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_mu( @@ -2367,7 +2367,7 @@ void test_vloxseg4ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_mu( @@ -2384,7 +2384,7 @@ void test_vloxseg4ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_mu( @@ -2401,7 +2401,7 @@ void test_vloxseg4ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_mu( @@ -2418,7 +2418,7 @@ void test_vloxseg4ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_mu( @@ -2435,7 +2435,7 @@ void test_vloxseg4ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_mu( @@ -2452,7 +2452,7 @@ void test_vloxseg4ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_mu( @@ -2469,7 +2469,7 @@ void test_vloxseg4ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_mu( @@ -2486,7 +2486,7 @@ void test_vloxseg4ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_mu( @@ -2503,7 +2503,7 @@ void test_vloxseg4ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_mu( @@ -2520,6 +2520,6 @@ void test_vloxseg4ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg4ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vloxseg4ei8_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c index daa3e42cdbb4..7f5ce1c512ca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei16.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_tu( @@ -42,7 +42,7 @@ void test_vloxseg5ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_tu( @@ -61,7 +61,7 @@ void test_vloxseg5ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_tu( @@ -80,7 +80,7 @@ void test_vloxseg5ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_tu( @@ -99,7 +99,7 @@ void test_vloxseg5ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_tu( @@ -118,7 +118,7 @@ void test_vloxseg5ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_tu( @@ -137,7 +137,7 @@ void test_vloxseg5ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_tu( @@ -156,7 +156,7 @@ void test_vloxseg5ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_tu( @@ -175,7 +175,7 @@ void test_vloxseg5ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_tu( @@ -194,7 +194,7 @@ void test_vloxseg5ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_tu( @@ -213,7 +213,7 @@ void test_vloxseg5ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_tu( @@ -232,7 +232,7 @@ void test_vloxseg5ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_tu( @@ -251,7 +251,7 @@ void test_vloxseg5ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_tu( @@ -270,7 +270,7 @@ void test_vloxseg5ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_tu( @@ -289,7 +289,7 @@ void test_vloxseg5ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_tu( @@ -308,7 +308,7 @@ void test_vloxseg5ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_tu( @@ -327,7 +327,7 @@ void test_vloxseg5ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_tu( @@ -346,7 +346,7 @@ void test_vloxseg5ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_tu( @@ -365,7 +365,7 @@ void test_vloxseg5ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_tu( @@ -384,7 +384,7 @@ void test_vloxseg5ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_tu( @@ -403,7 +403,7 @@ void test_vloxseg5ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_tu( @@ -422,7 +422,7 @@ void test_vloxseg5ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_tu( @@ -441,7 +441,7 @@ void test_vloxseg5ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_tu( @@ -460,7 +460,7 @@ void test_vloxseg5ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_tu( @@ -479,7 +479,7 @@ void test_vloxseg5ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_tu( @@ -498,7 +498,7 @@ void test_vloxseg5ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_tum( @@ -517,7 +517,7 @@ void test_vloxseg5ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_tum( @@ -536,7 +536,7 @@ void test_vloxseg5ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_tum( @@ -555,7 +555,7 @@ void test_vloxseg5ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_tum( @@ -574,7 +574,7 @@ void test_vloxseg5ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_tum( @@ -593,7 +593,7 @@ void test_vloxseg5ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_tum( @@ -612,7 +612,7 @@ void test_vloxseg5ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_tum( @@ -631,7 +631,7 @@ void test_vloxseg5ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_tum( @@ -650,7 +650,7 @@ void test_vloxseg5ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_tum( @@ -669,7 +669,7 @@ void test_vloxseg5ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_tum( @@ -688,7 +688,7 @@ void test_vloxseg5ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_tum( @@ -707,7 +707,7 @@ void test_vloxseg5ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_tum( @@ -726,7 +726,7 @@ void test_vloxseg5ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_tum( @@ -745,7 +745,7 @@ void test_vloxseg5ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_tum( @@ -764,7 +764,7 @@ void test_vloxseg5ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_tum( @@ -783,7 +783,7 @@ void test_vloxseg5ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_tum( @@ -802,7 +802,7 @@ void test_vloxseg5ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_tum( @@ -821,7 +821,7 @@ void test_vloxseg5ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_tum( @@ -840,7 +840,7 @@ void test_vloxseg5ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_tum( @@ -859,7 +859,7 @@ void test_vloxseg5ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_tum( @@ -878,7 +878,7 @@ void test_vloxseg5ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_tum( @@ -897,7 +897,7 @@ void test_vloxseg5ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_tum( @@ -916,7 +916,7 @@ void test_vloxseg5ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_tum( @@ -935,7 +935,7 @@ void test_vloxseg5ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_tum( @@ -954,7 +954,7 @@ void test_vloxseg5ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_tum( @@ -973,7 +973,7 @@ void test_vloxseg5ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_tum( @@ -992,7 +992,7 @@ void test_vloxseg5ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_tumu( @@ -1011,7 +1011,7 @@ void test_vloxseg5ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_tumu( @@ -1030,7 +1030,7 @@ void test_vloxseg5ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_tumu( @@ -1049,7 +1049,7 @@ void test_vloxseg5ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_tumu( @@ -1068,7 +1068,7 @@ void test_vloxseg5ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_tumu( @@ -1087,7 +1087,7 @@ void test_vloxseg5ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_tumu( @@ -1106,7 +1106,7 @@ void test_vloxseg5ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_tumu( @@ -1125,7 +1125,7 @@ void test_vloxseg5ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_tumu( @@ -1144,7 +1144,7 @@ void test_vloxseg5ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_tumu( @@ -1163,7 +1163,7 @@ void test_vloxseg5ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_tumu( @@ -1182,7 +1182,7 @@ void test_vloxseg5ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_tumu( @@ -1201,7 +1201,7 @@ void test_vloxseg5ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_tumu( @@ -1220,7 +1220,7 @@ void test_vloxseg5ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_tumu( @@ -1239,7 +1239,7 @@ void test_vloxseg5ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_tumu( @@ -1258,7 +1258,7 @@ void test_vloxseg5ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_tumu( @@ -1277,7 +1277,7 @@ void test_vloxseg5ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_tumu( @@ -1296,7 +1296,7 @@ void test_vloxseg5ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_tumu( @@ -1315,7 +1315,7 @@ void test_vloxseg5ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_tumu( @@ -1334,7 +1334,7 @@ void test_vloxseg5ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_tumu( @@ -1353,7 +1353,7 @@ void test_vloxseg5ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_tumu( @@ -1372,7 +1372,7 @@ void test_vloxseg5ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_tumu( @@ -1391,7 +1391,7 @@ void test_vloxseg5ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_tumu( @@ -1410,7 +1410,7 @@ void test_vloxseg5ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg5ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_tumu( @@ -1448,7 +1448,7 @@ void test_vloxseg5ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_tumu( @@ -1467,7 +1467,7 @@ void test_vloxseg5ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_tumu( @@ -1486,7 +1486,7 @@ void test_vloxseg5ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_mu( @@ -1505,7 +1505,7 @@ void test_vloxseg5ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_mu( @@ -1524,7 +1524,7 @@ void test_vloxseg5ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_mu( @@ -1543,7 +1543,7 @@ void test_vloxseg5ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_mu( @@ -1562,7 +1562,7 @@ void test_vloxseg5ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_mu( @@ -1581,7 +1581,7 @@ void test_vloxseg5ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_mu( @@ -1600,7 +1600,7 @@ void test_vloxseg5ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_mu( @@ -1619,7 +1619,7 @@ void test_vloxseg5ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_mu( @@ -1638,7 +1638,7 @@ void test_vloxseg5ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_mu( @@ -1657,7 +1657,7 @@ void test_vloxseg5ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_mu( @@ -1676,7 +1676,7 @@ void test_vloxseg5ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_mu( @@ -1695,7 +1695,7 @@ void test_vloxseg5ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_mu( @@ -1714,7 +1714,7 @@ void test_vloxseg5ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_mu( @@ -1733,7 +1733,7 @@ void test_vloxseg5ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_mu( @@ -1752,7 +1752,7 @@ void test_vloxseg5ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_mu( @@ -1771,7 +1771,7 @@ void test_vloxseg5ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_mu( @@ -1790,7 +1790,7 @@ void test_vloxseg5ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_mu( @@ -1809,7 +1809,7 @@ void test_vloxseg5ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_mu( @@ -1828,7 +1828,7 @@ void test_vloxseg5ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_mu( @@ -1847,7 +1847,7 @@ void test_vloxseg5ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_mu( @@ -1866,7 +1866,7 @@ void test_vloxseg5ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_mu( @@ -1885,7 +1885,7 @@ void test_vloxseg5ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_mu( @@ -1904,7 +1904,7 @@ void test_vloxseg5ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_mu( @@ -1923,7 +1923,7 @@ void test_vloxseg5ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_mu( @@ -1942,7 +1942,7 @@ void test_vloxseg5ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_mu( @@ -1961,7 +1961,7 @@ void test_vloxseg5ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_mu( @@ -1980,6 +1980,6 @@ void test_vloxseg5ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei16_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c index 9de419110b8d..cdb2eafcc583 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei32.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_tu( @@ -42,7 +42,7 @@ void test_vloxseg5ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_tu( @@ -61,7 +61,7 @@ void test_vloxseg5ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_tu( @@ -80,7 +80,7 @@ void test_vloxseg5ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_tu( @@ -99,7 +99,7 @@ void test_vloxseg5ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_tu( @@ -118,7 +118,7 @@ void test_vloxseg5ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_tu( @@ -137,7 +137,7 @@ void test_vloxseg5ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_tu( @@ -156,7 +156,7 @@ void test_vloxseg5ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_tu( @@ -175,7 +175,7 @@ void test_vloxseg5ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_tu( @@ -194,7 +194,7 @@ void test_vloxseg5ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_tu( @@ -213,7 +213,7 @@ void test_vloxseg5ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_tu( @@ -232,7 +232,7 @@ void test_vloxseg5ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_tu( @@ -251,7 +251,7 @@ void test_vloxseg5ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_tu( @@ -270,7 +270,7 @@ void test_vloxseg5ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_tu( @@ -289,7 +289,7 @@ void test_vloxseg5ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_tu( @@ -308,7 +308,7 @@ void test_vloxseg5ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_tu( @@ -327,7 +327,7 @@ void test_vloxseg5ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_tu( @@ -346,7 +346,7 @@ void test_vloxseg5ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_tu( @@ -365,7 +365,7 @@ void test_vloxseg5ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_tu( @@ -384,7 +384,7 @@ void test_vloxseg5ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_tu( @@ -403,7 +403,7 @@ void test_vloxseg5ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_tu( @@ -422,7 +422,7 @@ void test_vloxseg5ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_tu( @@ -441,7 +441,7 @@ void test_vloxseg5ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_tu( @@ -460,7 +460,7 @@ void test_vloxseg5ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_tu( @@ -479,7 +479,7 @@ void test_vloxseg5ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_tu( @@ -498,7 +498,7 @@ void test_vloxseg5ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_tum( @@ -517,7 +517,7 @@ void test_vloxseg5ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_tum( @@ -536,7 +536,7 @@ void test_vloxseg5ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_tum( @@ -555,7 +555,7 @@ void test_vloxseg5ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_tum( @@ -574,7 +574,7 @@ void test_vloxseg5ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_tum( @@ -593,7 +593,7 @@ void test_vloxseg5ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_tum( @@ -612,7 +612,7 @@ void test_vloxseg5ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_tum( @@ -631,7 +631,7 @@ void test_vloxseg5ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_tum( @@ -650,7 +650,7 @@ void test_vloxseg5ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_tum( @@ -669,7 +669,7 @@ void test_vloxseg5ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_tum( @@ -688,7 +688,7 @@ void test_vloxseg5ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_tum( @@ -707,7 +707,7 @@ void test_vloxseg5ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_tum( @@ -726,7 +726,7 @@ void test_vloxseg5ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_tum( @@ -745,7 +745,7 @@ void test_vloxseg5ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_tum( @@ -764,7 +764,7 @@ void test_vloxseg5ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_tum( @@ -783,7 +783,7 @@ void test_vloxseg5ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_tum( @@ -802,7 +802,7 @@ void test_vloxseg5ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_tum( @@ -821,7 +821,7 @@ void test_vloxseg5ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_tum( @@ -840,7 +840,7 @@ void test_vloxseg5ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_tum( @@ -859,7 +859,7 @@ void test_vloxseg5ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_tum( @@ -878,7 +878,7 @@ void test_vloxseg5ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_tum( @@ -897,7 +897,7 @@ void test_vloxseg5ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_tum( @@ -916,7 +916,7 @@ void test_vloxseg5ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_tum( @@ -935,7 +935,7 @@ void test_vloxseg5ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_tum( @@ -954,7 +954,7 @@ void test_vloxseg5ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_tum( @@ -973,7 +973,7 @@ void test_vloxseg5ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_tum( @@ -992,7 +992,7 @@ void test_vloxseg5ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_tumu( @@ -1011,7 +1011,7 @@ void test_vloxseg5ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_tumu( @@ -1030,7 +1030,7 @@ void test_vloxseg5ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_tumu( @@ -1049,7 +1049,7 @@ void test_vloxseg5ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_tumu( @@ -1068,7 +1068,7 @@ void test_vloxseg5ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_tumu( @@ -1087,7 +1087,7 @@ void test_vloxseg5ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_tumu( @@ -1106,7 +1106,7 @@ void test_vloxseg5ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_tumu( @@ -1125,7 +1125,7 @@ void test_vloxseg5ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_tumu( @@ -1144,7 +1144,7 @@ void test_vloxseg5ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_tumu( @@ -1163,7 +1163,7 @@ void test_vloxseg5ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_tumu( @@ -1182,7 +1182,7 @@ void test_vloxseg5ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_tumu( @@ -1201,7 +1201,7 @@ void test_vloxseg5ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_tumu( @@ -1220,7 +1220,7 @@ void test_vloxseg5ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_tumu( @@ -1239,7 +1239,7 @@ void test_vloxseg5ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_tumu( @@ -1258,7 +1258,7 @@ void test_vloxseg5ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_tumu( @@ -1277,7 +1277,7 @@ void test_vloxseg5ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_tumu( @@ -1296,7 +1296,7 @@ void test_vloxseg5ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_tumu( @@ -1315,7 +1315,7 @@ void test_vloxseg5ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_tumu( @@ -1334,7 +1334,7 @@ void test_vloxseg5ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_tumu( @@ -1353,7 +1353,7 @@ void test_vloxseg5ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_tumu( @@ -1372,7 +1372,7 @@ void test_vloxseg5ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_tumu( @@ -1391,7 +1391,7 @@ void test_vloxseg5ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_tumu( @@ -1410,7 +1410,7 @@ void test_vloxseg5ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg5ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_tumu( @@ -1448,7 +1448,7 @@ void test_vloxseg5ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_tumu( @@ -1467,7 +1467,7 @@ void test_vloxseg5ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_tumu( @@ -1486,7 +1486,7 @@ void test_vloxseg5ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_mu( @@ -1505,7 +1505,7 @@ void test_vloxseg5ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_mu( @@ -1524,7 +1524,7 @@ void test_vloxseg5ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_mu( @@ -1543,7 +1543,7 @@ void test_vloxseg5ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_mu( @@ -1562,7 +1562,7 @@ void test_vloxseg5ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_mu( @@ -1581,7 +1581,7 @@ void test_vloxseg5ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_mu( @@ -1600,7 +1600,7 @@ void test_vloxseg5ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_mu( @@ -1619,7 +1619,7 @@ void test_vloxseg5ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_mu( @@ -1638,7 +1638,7 @@ void test_vloxseg5ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_mu( @@ -1657,7 +1657,7 @@ void test_vloxseg5ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_mu( @@ -1676,7 +1676,7 @@ void test_vloxseg5ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_mu( @@ -1695,7 +1695,7 @@ void test_vloxseg5ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_mu( @@ -1714,7 +1714,7 @@ void test_vloxseg5ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_mu( @@ -1733,7 +1733,7 @@ void test_vloxseg5ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_mu( @@ -1752,7 +1752,7 @@ void test_vloxseg5ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_mu( @@ -1771,7 +1771,7 @@ void test_vloxseg5ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_mu( @@ -1790,7 +1790,7 @@ void test_vloxseg5ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_mu( @@ -1809,7 +1809,7 @@ void test_vloxseg5ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_mu( @@ -1828,7 +1828,7 @@ void test_vloxseg5ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_mu( @@ -1847,7 +1847,7 @@ void test_vloxseg5ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_mu( @@ -1866,7 +1866,7 @@ void test_vloxseg5ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_mu( @@ -1885,7 +1885,7 @@ void test_vloxseg5ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_mu( @@ -1904,7 +1904,7 @@ void test_vloxseg5ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_mu( @@ -1923,7 +1923,7 @@ void test_vloxseg5ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_mu( @@ -1942,7 +1942,7 @@ void test_vloxseg5ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_mu( @@ -1961,7 +1961,7 @@ void test_vloxseg5ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_mu( @@ -1980,6 +1980,6 @@ void test_vloxseg5ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei32_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c index ae47fde5bc59..69261659bbac 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei64.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_tu( @@ -42,7 +42,7 @@ void test_vloxseg5ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_tu( @@ -61,7 +61,7 @@ void test_vloxseg5ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_tu( @@ -80,7 +80,7 @@ void test_vloxseg5ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_tu( @@ -99,7 +99,7 @@ void test_vloxseg5ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_tu( @@ -118,7 +118,7 @@ void test_vloxseg5ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_tu( @@ -137,7 +137,7 @@ void test_vloxseg5ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_tu( @@ -156,7 +156,7 @@ void test_vloxseg5ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_tu( @@ -175,7 +175,7 @@ void test_vloxseg5ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_tu( @@ -194,7 +194,7 @@ void test_vloxseg5ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_tu( @@ -213,7 +213,7 @@ void test_vloxseg5ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_tu( @@ -232,7 +232,7 @@ void test_vloxseg5ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_tu( @@ -251,7 +251,7 @@ void test_vloxseg5ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_tu( @@ -270,7 +270,7 @@ void test_vloxseg5ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_tu( @@ -289,7 +289,7 @@ void test_vloxseg5ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_tu( @@ -308,7 +308,7 @@ void test_vloxseg5ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_tu( @@ -327,7 +327,7 @@ void test_vloxseg5ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_tu( @@ -346,7 +346,7 @@ void test_vloxseg5ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_tu( @@ -365,7 +365,7 @@ void test_vloxseg5ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_tu( @@ -384,7 +384,7 @@ void test_vloxseg5ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_tu( @@ -403,7 +403,7 @@ void test_vloxseg5ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_tu( @@ -422,7 +422,7 @@ void test_vloxseg5ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_tu( @@ -441,7 +441,7 @@ void test_vloxseg5ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_tu( @@ -460,7 +460,7 @@ void test_vloxseg5ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_tu( @@ -479,7 +479,7 @@ void test_vloxseg5ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_tu( @@ -498,7 +498,7 @@ void test_vloxseg5ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_tum( @@ -517,7 +517,7 @@ void test_vloxseg5ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_tum( @@ -536,7 +536,7 @@ void test_vloxseg5ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_tum( @@ -555,7 +555,7 @@ void test_vloxseg5ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_tum( @@ -574,7 +574,7 @@ void test_vloxseg5ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_tum( @@ -593,7 +593,7 @@ void test_vloxseg5ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_tum( @@ -612,7 +612,7 @@ void test_vloxseg5ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_tum( @@ -631,7 +631,7 @@ void test_vloxseg5ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_tum( @@ -650,7 +650,7 @@ void test_vloxseg5ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_tum( @@ -669,7 +669,7 @@ void test_vloxseg5ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_tum( @@ -688,7 +688,7 @@ void test_vloxseg5ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_tum( @@ -707,7 +707,7 @@ void test_vloxseg5ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_tum( @@ -726,7 +726,7 @@ void test_vloxseg5ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_tum( @@ -745,7 +745,7 @@ void test_vloxseg5ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_tum( @@ -764,7 +764,7 @@ void test_vloxseg5ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_tum( @@ -783,7 +783,7 @@ void test_vloxseg5ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_tum( @@ -802,7 +802,7 @@ void test_vloxseg5ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_tum( @@ -821,7 +821,7 @@ void test_vloxseg5ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_tum( @@ -840,7 +840,7 @@ void test_vloxseg5ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_tum( @@ -859,7 +859,7 @@ void test_vloxseg5ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_tum( @@ -878,7 +878,7 @@ void test_vloxseg5ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_tum( @@ -897,7 +897,7 @@ void test_vloxseg5ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_tum( @@ -916,7 +916,7 @@ void test_vloxseg5ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_tum( @@ -935,7 +935,7 @@ void test_vloxseg5ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_tum( @@ -954,7 +954,7 @@ void test_vloxseg5ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_tum( @@ -973,7 +973,7 @@ void test_vloxseg5ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_tum( @@ -992,7 +992,7 @@ void test_vloxseg5ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_tumu( @@ -1011,7 +1011,7 @@ void test_vloxseg5ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_tumu( @@ -1030,7 +1030,7 @@ void test_vloxseg5ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_tumu( @@ -1049,7 +1049,7 @@ void test_vloxseg5ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_tumu( @@ -1068,7 +1068,7 @@ void test_vloxseg5ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_tumu( @@ -1087,7 +1087,7 @@ void test_vloxseg5ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_tumu( @@ -1106,7 +1106,7 @@ void test_vloxseg5ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_tumu( @@ -1125,7 +1125,7 @@ void test_vloxseg5ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_tumu( @@ -1144,7 +1144,7 @@ void test_vloxseg5ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_tumu( @@ -1163,7 +1163,7 @@ void test_vloxseg5ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_tumu( @@ -1182,7 +1182,7 @@ void test_vloxseg5ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_tumu( @@ -1201,7 +1201,7 @@ void test_vloxseg5ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_tumu( @@ -1220,7 +1220,7 @@ void test_vloxseg5ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_tumu( @@ -1239,7 +1239,7 @@ void test_vloxseg5ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_tumu( @@ -1258,7 +1258,7 @@ void test_vloxseg5ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_tumu( @@ -1277,7 +1277,7 @@ void test_vloxseg5ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_tumu( @@ -1296,7 +1296,7 @@ void test_vloxseg5ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_tumu( @@ -1315,7 +1315,7 @@ void test_vloxseg5ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_tumu( @@ -1334,7 +1334,7 @@ void test_vloxseg5ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_tumu( @@ -1353,7 +1353,7 @@ void test_vloxseg5ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_tumu( @@ -1372,7 +1372,7 @@ void test_vloxseg5ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_tumu( @@ -1391,7 +1391,7 @@ void test_vloxseg5ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_tumu( @@ -1410,7 +1410,7 @@ void test_vloxseg5ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg5ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_tumu( @@ -1448,7 +1448,7 @@ void test_vloxseg5ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_tumu( @@ -1467,7 +1467,7 @@ void test_vloxseg5ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_tumu( @@ -1486,7 +1486,7 @@ void test_vloxseg5ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_mu( @@ -1505,7 +1505,7 @@ void test_vloxseg5ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_mu( @@ -1524,7 +1524,7 @@ void test_vloxseg5ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_mu( @@ -1543,7 +1543,7 @@ void test_vloxseg5ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_mu( @@ -1562,7 +1562,7 @@ void test_vloxseg5ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_mu( @@ -1581,7 +1581,7 @@ void test_vloxseg5ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_mu( @@ -1600,7 +1600,7 @@ void test_vloxseg5ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_mu( @@ -1619,7 +1619,7 @@ void test_vloxseg5ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_mu( @@ -1638,7 +1638,7 @@ void test_vloxseg5ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_mu( @@ -1657,7 +1657,7 @@ void test_vloxseg5ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_mu( @@ -1676,7 +1676,7 @@ void test_vloxseg5ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_mu( @@ -1695,7 +1695,7 @@ void test_vloxseg5ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_mu( @@ -1714,7 +1714,7 @@ void test_vloxseg5ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_mu( @@ -1733,7 +1733,7 @@ void test_vloxseg5ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_mu( @@ -1752,7 +1752,7 @@ void test_vloxseg5ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_mu( @@ -1771,7 +1771,7 @@ void test_vloxseg5ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_mu( @@ -1790,7 +1790,7 @@ void test_vloxseg5ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_mu( @@ -1809,7 +1809,7 @@ void test_vloxseg5ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_mu( @@ -1828,7 +1828,7 @@ void test_vloxseg5ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_mu( @@ -1847,7 +1847,7 @@ void test_vloxseg5ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_mu( @@ -1866,7 +1866,7 @@ void test_vloxseg5ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_mu( @@ -1885,7 +1885,7 @@ void test_vloxseg5ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_mu( @@ -1904,7 +1904,7 @@ void test_vloxseg5ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_mu( @@ -1923,7 +1923,7 @@ void test_vloxseg5ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_mu( @@ -1942,7 +1942,7 @@ void test_vloxseg5ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_mu( @@ -1961,7 +1961,7 @@ void test_vloxseg5ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_mu( @@ -1980,6 +1980,6 @@ void test_vloxseg5ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei64_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c index 76931a548a71..d64363986a3b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg5ei8.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_tu( @@ -42,7 +42,7 @@ void test_vloxseg5ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_tu( @@ -61,7 +61,7 @@ void test_vloxseg5ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_tu( @@ -80,7 +80,7 @@ void test_vloxseg5ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_tu( @@ -99,7 +99,7 @@ void test_vloxseg5ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_tu( @@ -118,7 +118,7 @@ void test_vloxseg5ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_tu( @@ -137,7 +137,7 @@ void test_vloxseg5ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_tu( @@ -156,7 +156,7 @@ void test_vloxseg5ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_tu( @@ -175,7 +175,7 @@ void test_vloxseg5ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_tu( @@ -194,7 +194,7 @@ void test_vloxseg5ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_tu( @@ -213,7 +213,7 @@ void test_vloxseg5ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_tu( @@ -232,7 +232,7 @@ void test_vloxseg5ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_tu( @@ -251,7 +251,7 @@ void test_vloxseg5ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_tu( @@ -270,7 +270,7 @@ void test_vloxseg5ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_tu( @@ -289,7 +289,7 @@ void test_vloxseg5ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_tu( @@ -308,7 +308,7 @@ void test_vloxseg5ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_tu( @@ -327,7 +327,7 @@ void test_vloxseg5ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_tu( @@ -346,7 +346,7 @@ void test_vloxseg5ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_tu( @@ -365,7 +365,7 @@ void test_vloxseg5ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_tu( @@ -384,7 +384,7 @@ void test_vloxseg5ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_tu( @@ -403,7 +403,7 @@ void test_vloxseg5ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_tu( @@ -422,7 +422,7 @@ void test_vloxseg5ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_tu( @@ -441,7 +441,7 @@ void test_vloxseg5ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_tu( @@ -460,7 +460,7 @@ void test_vloxseg5ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_tu( @@ -479,7 +479,7 @@ void test_vloxseg5ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_tu( @@ -498,7 +498,7 @@ void test_vloxseg5ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_tum( @@ -517,7 +517,7 @@ void test_vloxseg5ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_tum( @@ -536,7 +536,7 @@ void test_vloxseg5ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_tum( @@ -555,7 +555,7 @@ void test_vloxseg5ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_tum( @@ -574,7 +574,7 @@ void test_vloxseg5ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_tum( @@ -593,7 +593,7 @@ void test_vloxseg5ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_tum( @@ -612,7 +612,7 @@ void test_vloxseg5ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_tum( @@ -631,7 +631,7 @@ void test_vloxseg5ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_tum( @@ -650,7 +650,7 @@ void test_vloxseg5ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_tum( @@ -669,7 +669,7 @@ void test_vloxseg5ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_tum( @@ -688,7 +688,7 @@ void test_vloxseg5ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_tum( @@ -707,7 +707,7 @@ void test_vloxseg5ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_tum( @@ -726,7 +726,7 @@ void test_vloxseg5ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_tum( @@ -745,7 +745,7 @@ void test_vloxseg5ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_tum( @@ -764,7 +764,7 @@ void test_vloxseg5ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_tum( @@ -783,7 +783,7 @@ void test_vloxseg5ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_tum( @@ -802,7 +802,7 @@ void test_vloxseg5ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_tum( @@ -821,7 +821,7 @@ void test_vloxseg5ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_tum( @@ -840,7 +840,7 @@ void test_vloxseg5ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_tum( @@ -859,7 +859,7 @@ void test_vloxseg5ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_tum( @@ -878,7 +878,7 @@ void test_vloxseg5ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_tum( @@ -897,7 +897,7 @@ void test_vloxseg5ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_tum( @@ -916,7 +916,7 @@ void test_vloxseg5ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_tum( @@ -935,7 +935,7 @@ void test_vloxseg5ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_tum( @@ -954,7 +954,7 @@ void test_vloxseg5ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_tum( @@ -973,7 +973,7 @@ void test_vloxseg5ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_tum( @@ -992,7 +992,7 @@ void test_vloxseg5ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_tumu( @@ -1011,7 +1011,7 @@ void test_vloxseg5ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_tumu( @@ -1030,7 +1030,7 @@ void test_vloxseg5ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_tumu( @@ -1049,7 +1049,7 @@ void test_vloxseg5ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_tumu( @@ -1068,7 +1068,7 @@ void test_vloxseg5ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_tumu( @@ -1087,7 +1087,7 @@ void test_vloxseg5ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_tumu( @@ -1106,7 +1106,7 @@ void test_vloxseg5ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_tumu( @@ -1125,7 +1125,7 @@ void test_vloxseg5ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_tumu( @@ -1144,7 +1144,7 @@ void test_vloxseg5ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_tumu( @@ -1163,7 +1163,7 @@ void test_vloxseg5ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_tumu( @@ -1182,7 +1182,7 @@ void test_vloxseg5ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_tumu( @@ -1201,7 +1201,7 @@ void test_vloxseg5ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_tumu( @@ -1220,7 +1220,7 @@ void test_vloxseg5ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_tumu( @@ -1239,7 +1239,7 @@ void test_vloxseg5ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_tumu( @@ -1258,7 +1258,7 @@ void test_vloxseg5ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_tumu( @@ -1277,7 +1277,7 @@ void test_vloxseg5ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_tumu( @@ -1296,7 +1296,7 @@ void test_vloxseg5ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_tumu( @@ -1315,7 +1315,7 @@ void test_vloxseg5ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_tumu( @@ -1334,7 +1334,7 @@ void test_vloxseg5ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_tumu( @@ -1353,7 +1353,7 @@ void test_vloxseg5ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_tumu( @@ -1372,7 +1372,7 @@ void test_vloxseg5ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_tumu( @@ -1391,7 +1391,7 @@ void test_vloxseg5ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_tumu( @@ -1410,7 +1410,7 @@ void test_vloxseg5ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg5ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_tumu( @@ -1448,7 +1448,7 @@ void test_vloxseg5ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_tumu( @@ -1467,7 +1467,7 @@ void test_vloxseg5ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_tumu( @@ -1486,7 +1486,7 @@ void test_vloxseg5ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_mu( @@ -1505,7 +1505,7 @@ void test_vloxseg5ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_mu( @@ -1524,7 +1524,7 @@ void test_vloxseg5ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_mu( @@ -1543,7 +1543,7 @@ void test_vloxseg5ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_mu( @@ -1562,7 +1562,7 @@ void test_vloxseg5ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_mu( @@ -1581,7 +1581,7 @@ void test_vloxseg5ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_mu( @@ -1600,7 +1600,7 @@ void test_vloxseg5ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_mu( @@ -1619,7 +1619,7 @@ void test_vloxseg5ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_mu( @@ -1638,7 +1638,7 @@ void test_vloxseg5ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_mu( @@ -1657,7 +1657,7 @@ void test_vloxseg5ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_mu( @@ -1676,7 +1676,7 @@ void test_vloxseg5ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_mu( @@ -1695,7 +1695,7 @@ void test_vloxseg5ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_mu( @@ -1714,7 +1714,7 @@ void test_vloxseg5ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_mu( @@ -1733,7 +1733,7 @@ void test_vloxseg5ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_mu( @@ -1752,7 +1752,7 @@ void test_vloxseg5ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_mu( @@ -1771,7 +1771,7 @@ void test_vloxseg5ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_mu( @@ -1790,7 +1790,7 @@ void test_vloxseg5ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_mu( @@ -1809,7 +1809,7 @@ void test_vloxseg5ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_mu( @@ -1828,7 +1828,7 @@ void test_vloxseg5ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_mu( @@ -1847,7 +1847,7 @@ void test_vloxseg5ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_mu( @@ -1866,7 +1866,7 @@ void test_vloxseg5ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_mu( @@ -1885,7 +1885,7 @@ void test_vloxseg5ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_mu( @@ -1904,7 +1904,7 @@ void test_vloxseg5ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_mu( @@ -1923,7 +1923,7 @@ void test_vloxseg5ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_mu( @@ -1942,7 +1942,7 @@ void test_vloxseg5ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_mu( @@ -1961,7 +1961,7 @@ void test_vloxseg5ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_mu( @@ -1980,6 +1980,6 @@ void test_vloxseg5ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg5ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vloxseg5ei8_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c index fb637425fc29..63bdd28a19f4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei16.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_tu( @@ -46,7 +46,7 @@ void test_vloxseg6ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_tu( @@ -67,7 +67,7 @@ void test_vloxseg6ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_tu( @@ -88,7 +88,7 @@ void test_vloxseg6ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_tu( @@ -109,7 +109,7 @@ void test_vloxseg6ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_tu( @@ -130,7 +130,7 @@ void test_vloxseg6ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_tu( @@ -151,7 +151,7 @@ void test_vloxseg6ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_tu( @@ -172,7 +172,7 @@ void test_vloxseg6ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_tu( @@ -193,7 +193,7 @@ void test_vloxseg6ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_tu( @@ -214,7 +214,7 @@ void test_vloxseg6ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_tu( @@ -235,7 +235,7 @@ void test_vloxseg6ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_tu( @@ -256,7 +256,7 @@ void test_vloxseg6ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vloxseg6ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_tu( @@ -298,7 +298,7 @@ void test_vloxseg6ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_tu( @@ -319,7 +319,7 @@ void test_vloxseg6ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_tu( @@ -340,7 +340,7 @@ void test_vloxseg6ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_tu( @@ -361,7 +361,7 @@ void test_vloxseg6ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_tu( @@ -382,7 +382,7 @@ void test_vloxseg6ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_tu( @@ -403,7 +403,7 @@ void test_vloxseg6ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_tu( @@ -424,7 +424,7 @@ void test_vloxseg6ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_tu( @@ -445,7 +445,7 @@ void test_vloxseg6ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_tu( @@ -466,7 +466,7 @@ void test_vloxseg6ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_tu( @@ -487,7 +487,7 @@ void test_vloxseg6ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_tu( @@ -508,7 +508,7 @@ void test_vloxseg6ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_tu( @@ -529,7 +529,7 @@ void test_vloxseg6ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_tu( @@ -550,7 +550,7 @@ void test_vloxseg6ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_tum( @@ -571,7 +571,7 @@ void test_vloxseg6ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_tum( @@ -592,7 +592,7 @@ void test_vloxseg6ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_tum( @@ -613,7 +613,7 @@ void test_vloxseg6ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vloxseg6ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_tum( @@ -655,7 +655,7 @@ void test_vloxseg6ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_tum( @@ -676,7 +676,7 @@ void test_vloxseg6ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_tum( @@ -697,7 +697,7 @@ void test_vloxseg6ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_tum( @@ -718,7 +718,7 @@ void test_vloxseg6ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vloxseg6ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_tum( @@ -760,7 +760,7 @@ void test_vloxseg6ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_tum( @@ -781,7 +781,7 @@ void test_vloxseg6ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_tum( @@ -802,7 +802,7 @@ void test_vloxseg6ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_tum( @@ -823,7 +823,7 @@ void test_vloxseg6ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vloxseg6ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_tum( @@ -865,7 +865,7 @@ void test_vloxseg6ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_tum( @@ -886,7 +886,7 @@ void test_vloxseg6ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_tum( @@ -907,7 +907,7 @@ void test_vloxseg6ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_tum( @@ -928,7 +928,7 @@ void test_vloxseg6ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vloxseg6ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_tum( @@ -970,7 +970,7 @@ void test_vloxseg6ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_tum( @@ -991,7 +991,7 @@ void test_vloxseg6ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_tum( @@ -1012,7 +1012,7 @@ void test_vloxseg6ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_tum( @@ -1033,7 +1033,7 @@ void test_vloxseg6ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg6ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_tum( @@ -1075,7 +1075,7 @@ void test_vloxseg6ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_tum( @@ -1096,7 +1096,7 @@ void test_vloxseg6ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_tumu( @@ -1117,7 +1117,7 @@ void test_vloxseg6ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_tumu( @@ -1138,7 +1138,7 @@ void test_vloxseg6ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vloxseg6ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_tumu( @@ -1180,7 +1180,7 @@ void test_vloxseg6ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_tumu( @@ -1201,7 +1201,7 @@ void test_vloxseg6ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_tumu( @@ -1222,7 +1222,7 @@ void test_vloxseg6ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_tumu( @@ -1243,7 +1243,7 @@ void test_vloxseg6ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_tumu( @@ -1264,7 +1264,7 @@ void test_vloxseg6ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_tumu( @@ -1285,7 +1285,7 @@ void test_vloxseg6ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_tumu( @@ -1306,7 +1306,7 @@ void test_vloxseg6ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_tumu( @@ -1327,7 +1327,7 @@ void test_vloxseg6ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_tumu( @@ -1348,7 +1348,7 @@ void test_vloxseg6ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg6ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_tumu( @@ -1390,7 +1390,7 @@ void test_vloxseg6ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_tumu( @@ -1411,7 +1411,7 @@ void test_vloxseg6ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_tumu( @@ -1432,7 +1432,7 @@ void test_vloxseg6ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_tumu( @@ -1453,7 +1453,7 @@ void test_vloxseg6ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_tumu( @@ -1474,7 +1474,7 @@ void test_vloxseg6ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_tumu( @@ -1495,7 +1495,7 @@ void test_vloxseg6ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_tumu( @@ -1516,7 +1516,7 @@ void test_vloxseg6ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_tumu( @@ -1537,7 +1537,7 @@ void test_vloxseg6ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_tumu( @@ -1558,7 +1558,7 @@ void test_vloxseg6ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg6ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_tumu( @@ -1600,7 +1600,7 @@ void test_vloxseg6ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_tumu( @@ -1621,7 +1621,7 @@ void test_vloxseg6ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_tumu( @@ -1642,7 +1642,7 @@ void test_vloxseg6ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_mu( @@ -1663,7 +1663,7 @@ void test_vloxseg6ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_mu( @@ -1684,7 +1684,7 @@ void test_vloxseg6ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_mu( @@ -1705,7 +1705,7 @@ void test_vloxseg6ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_mu( @@ -1726,7 +1726,7 @@ void test_vloxseg6ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_mu( @@ -1747,7 +1747,7 @@ void test_vloxseg6ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_mu( @@ -1768,7 +1768,7 @@ void test_vloxseg6ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_mu( @@ -1789,7 +1789,7 @@ void test_vloxseg6ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_mu( @@ -1810,7 +1810,7 @@ void test_vloxseg6ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_mu( @@ -1831,7 +1831,7 @@ void test_vloxseg6ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_mu( @@ -1852,7 +1852,7 @@ void test_vloxseg6ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_mu( @@ -1873,7 +1873,7 @@ void test_vloxseg6ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_mu( @@ -1894,7 +1894,7 @@ void test_vloxseg6ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_mu( @@ -1915,7 +1915,7 @@ void test_vloxseg6ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_mu( @@ -1936,7 +1936,7 @@ void test_vloxseg6ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_mu( @@ -1957,7 +1957,7 @@ void test_vloxseg6ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_mu( @@ -1978,7 +1978,7 @@ void test_vloxseg6ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_mu( @@ -1999,7 +1999,7 @@ void test_vloxseg6ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_mu( @@ -2020,7 +2020,7 @@ void test_vloxseg6ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_mu( @@ -2041,7 +2041,7 @@ void test_vloxseg6ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_mu( @@ -2062,7 +2062,7 @@ void test_vloxseg6ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_mu( @@ -2083,7 +2083,7 @@ void test_vloxseg6ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_mu( @@ -2104,7 +2104,7 @@ void test_vloxseg6ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_mu( @@ -2125,7 +2125,7 @@ void test_vloxseg6ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_mu( @@ -2146,7 +2146,7 @@ void test_vloxseg6ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_mu( @@ -2167,7 +2167,7 @@ void test_vloxseg6ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_mu( @@ -2188,6 +2188,6 @@ void test_vloxseg6ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c index 270d511ca025..96190c48b5f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei32.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_tu( @@ -46,7 +46,7 @@ void test_vloxseg6ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_tu( @@ -67,7 +67,7 @@ void test_vloxseg6ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_tu( @@ -88,7 +88,7 @@ void test_vloxseg6ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_tu( @@ -109,7 +109,7 @@ void test_vloxseg6ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_tu( @@ -130,7 +130,7 @@ void test_vloxseg6ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_tu( @@ -151,7 +151,7 @@ void test_vloxseg6ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_tu( @@ -172,7 +172,7 @@ void test_vloxseg6ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_tu( @@ -193,7 +193,7 @@ void test_vloxseg6ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_tu( @@ -214,7 +214,7 @@ void test_vloxseg6ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_tu( @@ -235,7 +235,7 @@ void test_vloxseg6ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_tu( @@ -256,7 +256,7 @@ void test_vloxseg6ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vloxseg6ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_tu( @@ -298,7 +298,7 @@ void test_vloxseg6ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_tu( @@ -319,7 +319,7 @@ void test_vloxseg6ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_tu( @@ -340,7 +340,7 @@ void test_vloxseg6ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_tu( @@ -361,7 +361,7 @@ void test_vloxseg6ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_tu( @@ -382,7 +382,7 @@ void test_vloxseg6ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_tu( @@ -403,7 +403,7 @@ void test_vloxseg6ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_tu( @@ -424,7 +424,7 @@ void test_vloxseg6ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_tu( @@ -445,7 +445,7 @@ void test_vloxseg6ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_tu( @@ -466,7 +466,7 @@ void test_vloxseg6ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_tu( @@ -487,7 +487,7 @@ void test_vloxseg6ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_tu( @@ -508,7 +508,7 @@ void test_vloxseg6ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_tu( @@ -529,7 +529,7 @@ void test_vloxseg6ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_tu( @@ -550,7 +550,7 @@ void test_vloxseg6ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_tum( @@ -571,7 +571,7 @@ void test_vloxseg6ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_tum( @@ -592,7 +592,7 @@ void test_vloxseg6ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_tum( @@ -613,7 +613,7 @@ void test_vloxseg6ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vloxseg6ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_tum( @@ -655,7 +655,7 @@ void test_vloxseg6ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_tum( @@ -676,7 +676,7 @@ void test_vloxseg6ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_tum( @@ -697,7 +697,7 @@ void test_vloxseg6ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_tum( @@ -718,7 +718,7 @@ void test_vloxseg6ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vloxseg6ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_tum( @@ -760,7 +760,7 @@ void test_vloxseg6ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_tum( @@ -781,7 +781,7 @@ void test_vloxseg6ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_tum( @@ -802,7 +802,7 @@ void test_vloxseg6ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_tum( @@ -823,7 +823,7 @@ void test_vloxseg6ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vloxseg6ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_tum( @@ -865,7 +865,7 @@ void test_vloxseg6ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_tum( @@ -886,7 +886,7 @@ void test_vloxseg6ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_tum( @@ -907,7 +907,7 @@ void test_vloxseg6ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_tum( @@ -928,7 +928,7 @@ void test_vloxseg6ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vloxseg6ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_tum( @@ -970,7 +970,7 @@ void test_vloxseg6ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_tum( @@ -991,7 +991,7 @@ void test_vloxseg6ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_tum( @@ -1012,7 +1012,7 @@ void test_vloxseg6ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_tum( @@ -1033,7 +1033,7 @@ void test_vloxseg6ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg6ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_tum( @@ -1075,7 +1075,7 @@ void test_vloxseg6ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_tum( @@ -1096,7 +1096,7 @@ void test_vloxseg6ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_tumu( @@ -1117,7 +1117,7 @@ void test_vloxseg6ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_tumu( @@ -1138,7 +1138,7 @@ void test_vloxseg6ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vloxseg6ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_tumu( @@ -1180,7 +1180,7 @@ void test_vloxseg6ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_tumu( @@ -1201,7 +1201,7 @@ void test_vloxseg6ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_tumu( @@ -1222,7 +1222,7 @@ void test_vloxseg6ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_tumu( @@ -1243,7 +1243,7 @@ void test_vloxseg6ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_tumu( @@ -1264,7 +1264,7 @@ void test_vloxseg6ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_tumu( @@ -1285,7 +1285,7 @@ void test_vloxseg6ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_tumu( @@ -1306,7 +1306,7 @@ void test_vloxseg6ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_tumu( @@ -1327,7 +1327,7 @@ void test_vloxseg6ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_tumu( @@ -1348,7 +1348,7 @@ void test_vloxseg6ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg6ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_tumu( @@ -1390,7 +1390,7 @@ void test_vloxseg6ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_tumu( @@ -1411,7 +1411,7 @@ void test_vloxseg6ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_tumu( @@ -1432,7 +1432,7 @@ void test_vloxseg6ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_tumu( @@ -1453,7 +1453,7 @@ void test_vloxseg6ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_tumu( @@ -1474,7 +1474,7 @@ void test_vloxseg6ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_tumu( @@ -1495,7 +1495,7 @@ void test_vloxseg6ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_tumu( @@ -1516,7 +1516,7 @@ void test_vloxseg6ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_tumu( @@ -1537,7 +1537,7 @@ void test_vloxseg6ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_tumu( @@ -1558,7 +1558,7 @@ void test_vloxseg6ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg6ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_tumu( @@ -1600,7 +1600,7 @@ void test_vloxseg6ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_tumu( @@ -1621,7 +1621,7 @@ void test_vloxseg6ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_tumu( @@ -1642,7 +1642,7 @@ void test_vloxseg6ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_mu( @@ -1663,7 +1663,7 @@ void test_vloxseg6ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_mu( @@ -1684,7 +1684,7 @@ void test_vloxseg6ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_mu( @@ -1705,7 +1705,7 @@ void test_vloxseg6ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_mu( @@ -1726,7 +1726,7 @@ void test_vloxseg6ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_mu( @@ -1747,7 +1747,7 @@ void test_vloxseg6ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_mu( @@ -1768,7 +1768,7 @@ void test_vloxseg6ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_mu( @@ -1789,7 +1789,7 @@ void test_vloxseg6ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_mu( @@ -1810,7 +1810,7 @@ void test_vloxseg6ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_mu( @@ -1831,7 +1831,7 @@ void test_vloxseg6ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_mu( @@ -1852,7 +1852,7 @@ void test_vloxseg6ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_mu( @@ -1873,7 +1873,7 @@ void test_vloxseg6ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_mu( @@ -1894,7 +1894,7 @@ void test_vloxseg6ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_mu( @@ -1915,7 +1915,7 @@ void test_vloxseg6ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_mu( @@ -1936,7 +1936,7 @@ void test_vloxseg6ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_mu( @@ -1957,7 +1957,7 @@ void test_vloxseg6ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_mu( @@ -1978,7 +1978,7 @@ void test_vloxseg6ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_mu( @@ -1999,7 +1999,7 @@ void test_vloxseg6ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_mu( @@ -2020,7 +2020,7 @@ void test_vloxseg6ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_mu( @@ -2041,7 +2041,7 @@ void test_vloxseg6ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_mu( @@ -2062,7 +2062,7 @@ void test_vloxseg6ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_mu( @@ -2083,7 +2083,7 @@ void test_vloxseg6ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_mu( @@ -2104,7 +2104,7 @@ void test_vloxseg6ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_mu( @@ -2125,7 +2125,7 @@ void test_vloxseg6ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_mu( @@ -2146,7 +2146,7 @@ void test_vloxseg6ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_mu( @@ -2167,7 +2167,7 @@ void test_vloxseg6ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_mu( @@ -2188,6 +2188,6 @@ void test_vloxseg6ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c index 5d87b9b276b2..07bbd76c7f4c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei64.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_tu( @@ -46,7 +46,7 @@ void test_vloxseg6ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_tu( @@ -67,7 +67,7 @@ void test_vloxseg6ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_tu( @@ -88,7 +88,7 @@ void test_vloxseg6ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_tu( @@ -109,7 +109,7 @@ void test_vloxseg6ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_tu( @@ -130,7 +130,7 @@ void test_vloxseg6ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_tu( @@ -151,7 +151,7 @@ void test_vloxseg6ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_tu( @@ -172,7 +172,7 @@ void test_vloxseg6ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_tu( @@ -193,7 +193,7 @@ void test_vloxseg6ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_tu( @@ -214,7 +214,7 @@ void test_vloxseg6ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_tu( @@ -235,7 +235,7 @@ void test_vloxseg6ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_tu( @@ -256,7 +256,7 @@ void test_vloxseg6ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vloxseg6ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_tu( @@ -298,7 +298,7 @@ void test_vloxseg6ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_tu( @@ -319,7 +319,7 @@ void test_vloxseg6ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_tu( @@ -340,7 +340,7 @@ void test_vloxseg6ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_tu( @@ -361,7 +361,7 @@ void test_vloxseg6ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_tu( @@ -382,7 +382,7 @@ void test_vloxseg6ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_tu( @@ -403,7 +403,7 @@ void test_vloxseg6ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_tu( @@ -424,7 +424,7 @@ void test_vloxseg6ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_tu( @@ -445,7 +445,7 @@ void test_vloxseg6ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_tu( @@ -466,7 +466,7 @@ void test_vloxseg6ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_tu( @@ -487,7 +487,7 @@ void test_vloxseg6ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_tu( @@ -508,7 +508,7 @@ void test_vloxseg6ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_tu( @@ -529,7 +529,7 @@ void test_vloxseg6ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_tu( @@ -550,7 +550,7 @@ void test_vloxseg6ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_tum( @@ -571,7 +571,7 @@ void test_vloxseg6ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_tum( @@ -592,7 +592,7 @@ void test_vloxseg6ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_tum( @@ -613,7 +613,7 @@ void test_vloxseg6ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vloxseg6ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_tum( @@ -655,7 +655,7 @@ void test_vloxseg6ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_tum( @@ -676,7 +676,7 @@ void test_vloxseg6ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_tum( @@ -697,7 +697,7 @@ void test_vloxseg6ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_tum( @@ -718,7 +718,7 @@ void test_vloxseg6ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vloxseg6ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_tum( @@ -760,7 +760,7 @@ void test_vloxseg6ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_tum( @@ -781,7 +781,7 @@ void test_vloxseg6ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_tum( @@ -802,7 +802,7 @@ void test_vloxseg6ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_tum( @@ -823,7 +823,7 @@ void test_vloxseg6ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vloxseg6ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_tum( @@ -865,7 +865,7 @@ void test_vloxseg6ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_tum( @@ -886,7 +886,7 @@ void test_vloxseg6ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_tum( @@ -907,7 +907,7 @@ void test_vloxseg6ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_tum( @@ -928,7 +928,7 @@ void test_vloxseg6ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vloxseg6ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_tum( @@ -970,7 +970,7 @@ void test_vloxseg6ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_tum( @@ -991,7 +991,7 @@ void test_vloxseg6ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_tum( @@ -1012,7 +1012,7 @@ void test_vloxseg6ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_tum( @@ -1033,7 +1033,7 @@ void test_vloxseg6ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg6ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_tum( @@ -1075,7 +1075,7 @@ void test_vloxseg6ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_tum( @@ -1096,7 +1096,7 @@ void test_vloxseg6ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_tumu( @@ -1117,7 +1117,7 @@ void test_vloxseg6ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_tumu( @@ -1138,7 +1138,7 @@ void test_vloxseg6ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vloxseg6ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_tumu( @@ -1180,7 +1180,7 @@ void test_vloxseg6ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_tumu( @@ -1201,7 +1201,7 @@ void test_vloxseg6ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_tumu( @@ -1222,7 +1222,7 @@ void test_vloxseg6ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_tumu( @@ -1243,7 +1243,7 @@ void test_vloxseg6ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_tumu( @@ -1264,7 +1264,7 @@ void test_vloxseg6ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_tumu( @@ -1285,7 +1285,7 @@ void test_vloxseg6ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_tumu( @@ -1306,7 +1306,7 @@ void test_vloxseg6ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_tumu( @@ -1327,7 +1327,7 @@ void test_vloxseg6ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_tumu( @@ -1348,7 +1348,7 @@ void test_vloxseg6ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg6ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_tumu( @@ -1390,7 +1390,7 @@ void test_vloxseg6ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_tumu( @@ -1411,7 +1411,7 @@ void test_vloxseg6ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_tumu( @@ -1432,7 +1432,7 @@ void test_vloxseg6ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_tumu( @@ -1453,7 +1453,7 @@ void test_vloxseg6ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_tumu( @@ -1474,7 +1474,7 @@ void test_vloxseg6ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_tumu( @@ -1495,7 +1495,7 @@ void test_vloxseg6ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_tumu( @@ -1516,7 +1516,7 @@ void test_vloxseg6ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_tumu( @@ -1537,7 +1537,7 @@ void test_vloxseg6ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_tumu( @@ -1558,7 +1558,7 @@ void test_vloxseg6ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg6ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_tumu( @@ -1600,7 +1600,7 @@ void test_vloxseg6ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_tumu( @@ -1621,7 +1621,7 @@ void test_vloxseg6ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_tumu( @@ -1642,7 +1642,7 @@ void test_vloxseg6ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_mu( @@ -1663,7 +1663,7 @@ void test_vloxseg6ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_mu( @@ -1684,7 +1684,7 @@ void test_vloxseg6ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_mu( @@ -1705,7 +1705,7 @@ void test_vloxseg6ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_mu( @@ -1726,7 +1726,7 @@ void test_vloxseg6ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_mu( @@ -1747,7 +1747,7 @@ void test_vloxseg6ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_mu( @@ -1768,7 +1768,7 @@ void test_vloxseg6ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_mu( @@ -1789,7 +1789,7 @@ void test_vloxseg6ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_mu( @@ -1810,7 +1810,7 @@ void test_vloxseg6ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_mu( @@ -1831,7 +1831,7 @@ void test_vloxseg6ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_mu( @@ -1852,7 +1852,7 @@ void test_vloxseg6ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_mu( @@ -1873,7 +1873,7 @@ void test_vloxseg6ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_mu( @@ -1894,7 +1894,7 @@ void test_vloxseg6ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_mu( @@ -1915,7 +1915,7 @@ void test_vloxseg6ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_mu( @@ -1936,7 +1936,7 @@ void test_vloxseg6ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_mu( @@ -1957,7 +1957,7 @@ void test_vloxseg6ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_mu( @@ -1978,7 +1978,7 @@ void test_vloxseg6ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_mu( @@ -1999,7 +1999,7 @@ void test_vloxseg6ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_mu( @@ -2020,7 +2020,7 @@ void test_vloxseg6ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_mu( @@ -2041,7 +2041,7 @@ void test_vloxseg6ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_mu( @@ -2062,7 +2062,7 @@ void test_vloxseg6ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_mu( @@ -2083,7 +2083,7 @@ void test_vloxseg6ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_mu( @@ -2104,7 +2104,7 @@ void test_vloxseg6ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_mu( @@ -2125,7 +2125,7 @@ void test_vloxseg6ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_mu( @@ -2146,7 +2146,7 @@ void test_vloxseg6ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_mu( @@ -2167,7 +2167,7 @@ void test_vloxseg6ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_mu( @@ -2188,6 +2188,6 @@ void test_vloxseg6ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c index ad7956e63fba..cf0cc36dbcc7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg6ei8.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_tu( @@ -46,7 +46,7 @@ void test_vloxseg6ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_tu( @@ -67,7 +67,7 @@ void test_vloxseg6ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_tu( @@ -88,7 +88,7 @@ void test_vloxseg6ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_tu( @@ -109,7 +109,7 @@ void test_vloxseg6ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_tu( @@ -130,7 +130,7 @@ void test_vloxseg6ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_tu( @@ -151,7 +151,7 @@ void test_vloxseg6ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_tu( @@ -172,7 +172,7 @@ void test_vloxseg6ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_tu( @@ -193,7 +193,7 @@ void test_vloxseg6ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_tu( @@ -214,7 +214,7 @@ void test_vloxseg6ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_tu( @@ -235,7 +235,7 @@ void test_vloxseg6ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_tu( @@ -256,7 +256,7 @@ void test_vloxseg6ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vloxseg6ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_tu( @@ -298,7 +298,7 @@ void test_vloxseg6ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_tu( @@ -319,7 +319,7 @@ void test_vloxseg6ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_tu( @@ -340,7 +340,7 @@ void test_vloxseg6ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_tu( @@ -361,7 +361,7 @@ void test_vloxseg6ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_tu( @@ -382,7 +382,7 @@ void test_vloxseg6ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_tu( @@ -403,7 +403,7 @@ void test_vloxseg6ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_tu( @@ -424,7 +424,7 @@ void test_vloxseg6ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_tu( @@ -445,7 +445,7 @@ void test_vloxseg6ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_tu( @@ -466,7 +466,7 @@ void test_vloxseg6ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_tu( @@ -487,7 +487,7 @@ void test_vloxseg6ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_tu( @@ -508,7 +508,7 @@ void test_vloxseg6ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_tu( @@ -529,7 +529,7 @@ void test_vloxseg6ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_tu( @@ -550,7 +550,7 @@ void test_vloxseg6ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_tum( @@ -571,7 +571,7 @@ void test_vloxseg6ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_tum( @@ -592,7 +592,7 @@ void test_vloxseg6ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_tum( @@ -613,7 +613,7 @@ void test_vloxseg6ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vloxseg6ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_tum( @@ -655,7 +655,7 @@ void test_vloxseg6ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_tum( @@ -676,7 +676,7 @@ void test_vloxseg6ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_tum( @@ -697,7 +697,7 @@ void test_vloxseg6ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_tum( @@ -718,7 +718,7 @@ void test_vloxseg6ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vloxseg6ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_tum( @@ -760,7 +760,7 @@ void test_vloxseg6ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_tum( @@ -781,7 +781,7 @@ void test_vloxseg6ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_tum( @@ -802,7 +802,7 @@ void test_vloxseg6ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_tum( @@ -823,7 +823,7 @@ void test_vloxseg6ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vloxseg6ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_tum( @@ -865,7 +865,7 @@ void test_vloxseg6ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_tum( @@ -886,7 +886,7 @@ void test_vloxseg6ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_tum( @@ -907,7 +907,7 @@ void test_vloxseg6ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_tum( @@ -928,7 +928,7 @@ void test_vloxseg6ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vloxseg6ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_tum( @@ -970,7 +970,7 @@ void test_vloxseg6ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_tum( @@ -991,7 +991,7 @@ void test_vloxseg6ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_tum( @@ -1012,7 +1012,7 @@ void test_vloxseg6ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_tum( @@ -1033,7 +1033,7 @@ void test_vloxseg6ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg6ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_tum( @@ -1075,7 +1075,7 @@ void test_vloxseg6ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_tum( @@ -1096,7 +1096,7 @@ void test_vloxseg6ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_tumu( @@ -1117,7 +1117,7 @@ void test_vloxseg6ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_tumu( @@ -1138,7 +1138,7 @@ void test_vloxseg6ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vloxseg6ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_tumu( @@ -1180,7 +1180,7 @@ void test_vloxseg6ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_tumu( @@ -1201,7 +1201,7 @@ void test_vloxseg6ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_tumu( @@ -1222,7 +1222,7 @@ void test_vloxseg6ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_tumu( @@ -1243,7 +1243,7 @@ void test_vloxseg6ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_tumu( @@ -1264,7 +1264,7 @@ void test_vloxseg6ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_tumu( @@ -1285,7 +1285,7 @@ void test_vloxseg6ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_tumu( @@ -1306,7 +1306,7 @@ void test_vloxseg6ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_tumu( @@ -1327,7 +1327,7 @@ void test_vloxseg6ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_tumu( @@ -1348,7 +1348,7 @@ void test_vloxseg6ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vloxseg6ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_tumu( @@ -1390,7 +1390,7 @@ void test_vloxseg6ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_tumu( @@ -1411,7 +1411,7 @@ void test_vloxseg6ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_tumu( @@ -1432,7 +1432,7 @@ void test_vloxseg6ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_tumu( @@ -1453,7 +1453,7 @@ void test_vloxseg6ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_tumu( @@ -1474,7 +1474,7 @@ void test_vloxseg6ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_tumu( @@ -1495,7 +1495,7 @@ void test_vloxseg6ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_tumu( @@ -1516,7 +1516,7 @@ void test_vloxseg6ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_tumu( @@ -1537,7 +1537,7 @@ void test_vloxseg6ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_tumu( @@ -1558,7 +1558,7 @@ void test_vloxseg6ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg6ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_tumu( @@ -1600,7 +1600,7 @@ void test_vloxseg6ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_tumu( @@ -1621,7 +1621,7 @@ void test_vloxseg6ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_tumu( @@ -1642,7 +1642,7 @@ void test_vloxseg6ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_mu( @@ -1663,7 +1663,7 @@ void test_vloxseg6ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_mu( @@ -1684,7 +1684,7 @@ void test_vloxseg6ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_mu( @@ -1705,7 +1705,7 @@ void test_vloxseg6ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_mu( @@ -1726,7 +1726,7 @@ void test_vloxseg6ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_mu( @@ -1747,7 +1747,7 @@ void test_vloxseg6ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_mu( @@ -1768,7 +1768,7 @@ void test_vloxseg6ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_mu( @@ -1789,7 +1789,7 @@ void test_vloxseg6ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_mu( @@ -1810,7 +1810,7 @@ void test_vloxseg6ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_mu( @@ -1831,7 +1831,7 @@ void test_vloxseg6ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_mu( @@ -1852,7 +1852,7 @@ void test_vloxseg6ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_mu( @@ -1873,7 +1873,7 @@ void test_vloxseg6ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_mu( @@ -1894,7 +1894,7 @@ void test_vloxseg6ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_mu( @@ -1915,7 +1915,7 @@ void test_vloxseg6ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_mu( @@ -1936,7 +1936,7 @@ void test_vloxseg6ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_mu( @@ -1957,7 +1957,7 @@ void test_vloxseg6ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_mu( @@ -1978,7 +1978,7 @@ void test_vloxseg6ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_mu( @@ -1999,7 +1999,7 @@ void test_vloxseg6ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_mu( @@ -2020,7 +2020,7 @@ void test_vloxseg6ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_mu( @@ -2041,7 +2041,7 @@ void test_vloxseg6ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_mu( @@ -2062,7 +2062,7 @@ void test_vloxseg6ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_mu( @@ -2083,7 +2083,7 @@ void test_vloxseg6ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_mu( @@ -2104,7 +2104,7 @@ void test_vloxseg6ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_mu( @@ -2125,7 +2125,7 @@ void test_vloxseg6ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_mu( @@ -2146,7 +2146,7 @@ void test_vloxseg6ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_mu( @@ -2167,7 +2167,7 @@ void test_vloxseg6ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_mu( @@ -2188,6 +2188,6 @@ void test_vloxseg6ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg6ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vloxseg6ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c index ed3415c86243..9a24e5abbff1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei16.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_tu( @@ -50,7 +50,7 @@ void test_vloxseg7ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_tu( @@ -73,7 +73,7 @@ void test_vloxseg7ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_tu( @@ -96,7 +96,7 @@ void test_vloxseg7ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_tu( @@ -119,7 +119,7 @@ void test_vloxseg7ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_tu( @@ -142,7 +142,7 @@ void test_vloxseg7ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_tu( @@ -165,7 +165,7 @@ void test_vloxseg7ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_tu( @@ -188,7 +188,7 @@ void test_vloxseg7ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_tu( @@ -211,7 +211,7 @@ void test_vloxseg7ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_tu( @@ -234,7 +234,7 @@ void test_vloxseg7ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_tu( @@ -257,7 +257,7 @@ void test_vloxseg7ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_tu( @@ -280,7 +280,7 @@ void test_vloxseg7ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_tu( @@ -303,7 +303,7 @@ void test_vloxseg7ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_tu( @@ -326,7 +326,7 @@ void test_vloxseg7ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_tu( @@ -349,7 +349,7 @@ void test_vloxseg7ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_tu( @@ -372,7 +372,7 @@ void test_vloxseg7ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_tu( @@ -395,7 +395,7 @@ void test_vloxseg7ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_tu( @@ -418,7 +418,7 @@ void test_vloxseg7ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_tu( @@ -441,7 +441,7 @@ void test_vloxseg7ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_tu( @@ -464,7 +464,7 @@ void test_vloxseg7ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_tu( @@ -487,7 +487,7 @@ void test_vloxseg7ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_tu( @@ -510,7 +510,7 @@ void test_vloxseg7ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_tu( @@ -533,7 +533,7 @@ void test_vloxseg7ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_tu( @@ -556,7 +556,7 @@ void test_vloxseg7ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_tu( @@ -579,7 +579,7 @@ void test_vloxseg7ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vloxseg7ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_tum( @@ -625,7 +625,7 @@ void test_vloxseg7ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_tum( @@ -648,7 +648,7 @@ void test_vloxseg7ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_tum( @@ -671,7 +671,7 @@ void test_vloxseg7ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_tum( @@ -694,7 +694,7 @@ void test_vloxseg7ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_tum( @@ -717,7 +717,7 @@ void test_vloxseg7ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_tum( @@ -740,7 +740,7 @@ void test_vloxseg7ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_tum( @@ -763,7 +763,7 @@ void test_vloxseg7ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_tum( @@ -786,7 +786,7 @@ void test_vloxseg7ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_tum( @@ -809,7 +809,7 @@ void test_vloxseg7ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_tum( @@ -832,7 +832,7 @@ void test_vloxseg7ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_tum( @@ -855,7 +855,7 @@ void test_vloxseg7ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_tum( @@ -878,7 +878,7 @@ void test_vloxseg7ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vloxseg7ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_tum( @@ -924,7 +924,7 @@ void test_vloxseg7ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_tum( @@ -947,7 +947,7 @@ void test_vloxseg7ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_tum( @@ -970,7 +970,7 @@ void test_vloxseg7ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_tum( @@ -993,7 +993,7 @@ void test_vloxseg7ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_tum( @@ -1016,7 +1016,7 @@ void test_vloxseg7ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_tum( @@ -1039,7 +1039,7 @@ void test_vloxseg7ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_tum( @@ -1062,7 +1062,7 @@ void test_vloxseg7ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_tum( @@ -1085,7 +1085,7 @@ void test_vloxseg7ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_tum( @@ -1108,7 +1108,7 @@ void test_vloxseg7ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_tum( @@ -1131,7 +1131,7 @@ void test_vloxseg7ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_tum( @@ -1154,7 +1154,7 @@ void test_vloxseg7ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_tum( @@ -1177,7 +1177,7 @@ void test_vloxseg7ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_tum( @@ -1200,7 +1200,7 @@ void test_vloxseg7ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_tumu( @@ -1223,7 +1223,7 @@ void test_vloxseg7ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_tumu( @@ -1246,7 +1246,7 @@ void test_vloxseg7ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_tumu( @@ -1269,7 +1269,7 @@ void test_vloxseg7ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_tumu( @@ -1292,7 +1292,7 @@ void test_vloxseg7ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_tumu( @@ -1315,7 +1315,7 @@ void test_vloxseg7ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_tumu( @@ -1338,7 +1338,7 @@ void test_vloxseg7ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_tumu( @@ -1361,7 +1361,7 @@ void test_vloxseg7ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_tumu( @@ -1384,7 +1384,7 @@ void test_vloxseg7ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_tumu( @@ -1407,7 +1407,7 @@ void test_vloxseg7ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_tumu( @@ -1430,7 +1430,7 @@ void test_vloxseg7ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_tumu( @@ -1453,7 +1453,7 @@ void test_vloxseg7ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_tumu( @@ -1476,7 +1476,7 @@ void test_vloxseg7ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_tumu( @@ -1499,7 +1499,7 @@ void test_vloxseg7ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_tumu( @@ -1522,7 +1522,7 @@ void test_vloxseg7ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_tumu( @@ -1545,7 +1545,7 @@ void test_vloxseg7ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_tumu( @@ -1568,7 +1568,7 @@ void test_vloxseg7ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_tumu( @@ -1591,7 +1591,7 @@ void test_vloxseg7ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_tumu( @@ -1614,7 +1614,7 @@ void test_vloxseg7ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_tumu( @@ -1637,7 +1637,7 @@ void test_vloxseg7ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_tumu( @@ -1660,7 +1660,7 @@ void test_vloxseg7ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_tumu( @@ -1683,7 +1683,7 @@ void test_vloxseg7ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_tumu( @@ -1706,7 +1706,7 @@ void test_vloxseg7ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_tumu( @@ -1729,7 +1729,7 @@ void test_vloxseg7ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_tumu( @@ -1752,7 +1752,7 @@ void test_vloxseg7ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_tumu( @@ -1775,7 +1775,7 @@ void test_vloxseg7ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_tumu( @@ -1798,7 +1798,7 @@ void test_vloxseg7ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_mu( @@ -1821,7 +1821,7 @@ void test_vloxseg7ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_mu( @@ -1844,7 +1844,7 @@ void test_vloxseg7ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_mu( @@ -1867,7 +1867,7 @@ void test_vloxseg7ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_mu( @@ -1890,7 +1890,7 @@ void test_vloxseg7ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_mu( @@ -1913,7 +1913,7 @@ void test_vloxseg7ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_mu( @@ -1936,7 +1936,7 @@ void test_vloxseg7ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_mu( @@ -1959,7 +1959,7 @@ void test_vloxseg7ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_mu( @@ -1982,7 +1982,7 @@ void test_vloxseg7ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_mu( @@ -2005,7 +2005,7 @@ void test_vloxseg7ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_mu( @@ -2028,7 +2028,7 @@ void test_vloxseg7ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_mu( @@ -2051,7 +2051,7 @@ void test_vloxseg7ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_mu( @@ -2074,7 +2074,7 @@ void test_vloxseg7ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_mu( @@ -2097,7 +2097,7 @@ void test_vloxseg7ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_mu( @@ -2120,7 +2120,7 @@ void test_vloxseg7ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_mu( @@ -2143,7 +2143,7 @@ void test_vloxseg7ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_mu( @@ -2166,7 +2166,7 @@ void test_vloxseg7ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_mu( @@ -2189,7 +2189,7 @@ void test_vloxseg7ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_mu( @@ -2212,7 +2212,7 @@ void test_vloxseg7ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_mu( @@ -2235,7 +2235,7 @@ void test_vloxseg7ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_mu( @@ -2258,7 +2258,7 @@ void test_vloxseg7ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_mu( @@ -2281,7 +2281,7 @@ void test_vloxseg7ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_mu( @@ -2304,7 +2304,7 @@ void test_vloxseg7ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_mu( @@ -2327,7 +2327,7 @@ void test_vloxseg7ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_mu( @@ -2350,7 +2350,7 @@ void test_vloxseg7ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_mu( @@ -2373,7 +2373,7 @@ void test_vloxseg7ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_mu( @@ -2396,6 +2396,6 @@ void test_vloxseg7ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c index f3b119a53168..6f9379c2e0a2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei32.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_tu( @@ -50,7 +50,7 @@ void test_vloxseg7ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_tu( @@ -73,7 +73,7 @@ void test_vloxseg7ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_tu( @@ -96,7 +96,7 @@ void test_vloxseg7ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_tu( @@ -119,7 +119,7 @@ void test_vloxseg7ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_tu( @@ -142,7 +142,7 @@ void test_vloxseg7ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_tu( @@ -165,7 +165,7 @@ void test_vloxseg7ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_tu( @@ -188,7 +188,7 @@ void test_vloxseg7ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_tu( @@ -211,7 +211,7 @@ void test_vloxseg7ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_tu( @@ -234,7 +234,7 @@ void test_vloxseg7ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_tu( @@ -257,7 +257,7 @@ void test_vloxseg7ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_tu( @@ -280,7 +280,7 @@ void test_vloxseg7ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_tu( @@ -303,7 +303,7 @@ void test_vloxseg7ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_tu( @@ -326,7 +326,7 @@ void test_vloxseg7ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_tu( @@ -349,7 +349,7 @@ void test_vloxseg7ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_tu( @@ -372,7 +372,7 @@ void test_vloxseg7ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_tu( @@ -395,7 +395,7 @@ void test_vloxseg7ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_tu( @@ -418,7 +418,7 @@ void test_vloxseg7ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_tu( @@ -441,7 +441,7 @@ void test_vloxseg7ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_tu( @@ -464,7 +464,7 @@ void test_vloxseg7ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_tu( @@ -487,7 +487,7 @@ void test_vloxseg7ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_tu( @@ -510,7 +510,7 @@ void test_vloxseg7ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_tu( @@ -533,7 +533,7 @@ void test_vloxseg7ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_tu( @@ -556,7 +556,7 @@ void test_vloxseg7ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_tu( @@ -579,7 +579,7 @@ void test_vloxseg7ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vloxseg7ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_tum( @@ -625,7 +625,7 @@ void test_vloxseg7ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_tum( @@ -648,7 +648,7 @@ void test_vloxseg7ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_tum( @@ -671,7 +671,7 @@ void test_vloxseg7ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_tum( @@ -694,7 +694,7 @@ void test_vloxseg7ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_tum( @@ -717,7 +717,7 @@ void test_vloxseg7ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_tum( @@ -740,7 +740,7 @@ void test_vloxseg7ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_tum( @@ -763,7 +763,7 @@ void test_vloxseg7ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_tum( @@ -786,7 +786,7 @@ void test_vloxseg7ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_tum( @@ -809,7 +809,7 @@ void test_vloxseg7ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_tum( @@ -832,7 +832,7 @@ void test_vloxseg7ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_tum( @@ -855,7 +855,7 @@ void test_vloxseg7ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_tum( @@ -878,7 +878,7 @@ void test_vloxseg7ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vloxseg7ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_tum( @@ -924,7 +924,7 @@ void test_vloxseg7ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_tum( @@ -947,7 +947,7 @@ void test_vloxseg7ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_tum( @@ -970,7 +970,7 @@ void test_vloxseg7ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_tum( @@ -993,7 +993,7 @@ void test_vloxseg7ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_tum( @@ -1016,7 +1016,7 @@ void test_vloxseg7ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_tum( @@ -1039,7 +1039,7 @@ void test_vloxseg7ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_tum( @@ -1062,7 +1062,7 @@ void test_vloxseg7ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_tum( @@ -1085,7 +1085,7 @@ void test_vloxseg7ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_tum( @@ -1108,7 +1108,7 @@ void test_vloxseg7ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_tum( @@ -1131,7 +1131,7 @@ void test_vloxseg7ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_tum( @@ -1154,7 +1154,7 @@ void test_vloxseg7ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_tum( @@ -1177,7 +1177,7 @@ void test_vloxseg7ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_tum( @@ -1200,7 +1200,7 @@ void test_vloxseg7ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_tumu( @@ -1223,7 +1223,7 @@ void test_vloxseg7ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_tumu( @@ -1246,7 +1246,7 @@ void test_vloxseg7ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_tumu( @@ -1269,7 +1269,7 @@ void test_vloxseg7ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_tumu( @@ -1292,7 +1292,7 @@ void test_vloxseg7ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_tumu( @@ -1315,7 +1315,7 @@ void test_vloxseg7ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_tumu( @@ -1338,7 +1338,7 @@ void test_vloxseg7ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_tumu( @@ -1361,7 +1361,7 @@ void test_vloxseg7ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_tumu( @@ -1384,7 +1384,7 @@ void test_vloxseg7ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_tumu( @@ -1407,7 +1407,7 @@ void test_vloxseg7ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_tumu( @@ -1430,7 +1430,7 @@ void test_vloxseg7ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_tumu( @@ -1453,7 +1453,7 @@ void test_vloxseg7ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_tumu( @@ -1476,7 +1476,7 @@ void test_vloxseg7ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_tumu( @@ -1499,7 +1499,7 @@ void test_vloxseg7ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_tumu( @@ -1522,7 +1522,7 @@ void test_vloxseg7ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_tumu( @@ -1545,7 +1545,7 @@ void test_vloxseg7ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_tumu( @@ -1568,7 +1568,7 @@ void test_vloxseg7ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_tumu( @@ -1591,7 +1591,7 @@ void test_vloxseg7ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_tumu( @@ -1614,7 +1614,7 @@ void test_vloxseg7ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_tumu( @@ -1637,7 +1637,7 @@ void test_vloxseg7ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_tumu( @@ -1660,7 +1660,7 @@ void test_vloxseg7ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_tumu( @@ -1683,7 +1683,7 @@ void test_vloxseg7ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_tumu( @@ -1706,7 +1706,7 @@ void test_vloxseg7ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_tumu( @@ -1729,7 +1729,7 @@ void test_vloxseg7ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_tumu( @@ -1752,7 +1752,7 @@ void test_vloxseg7ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_tumu( @@ -1775,7 +1775,7 @@ void test_vloxseg7ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_tumu( @@ -1798,7 +1798,7 @@ void test_vloxseg7ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_mu( @@ -1821,7 +1821,7 @@ void test_vloxseg7ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_mu( @@ -1844,7 +1844,7 @@ void test_vloxseg7ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_mu( @@ -1867,7 +1867,7 @@ void test_vloxseg7ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_mu( @@ -1890,7 +1890,7 @@ void test_vloxseg7ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_mu( @@ -1913,7 +1913,7 @@ void test_vloxseg7ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_mu( @@ -1936,7 +1936,7 @@ void test_vloxseg7ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_mu( @@ -1959,7 +1959,7 @@ void test_vloxseg7ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_mu( @@ -1982,7 +1982,7 @@ void test_vloxseg7ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_mu( @@ -2005,7 +2005,7 @@ void test_vloxseg7ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_mu( @@ -2028,7 +2028,7 @@ void test_vloxseg7ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_mu( @@ -2051,7 +2051,7 @@ void test_vloxseg7ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_mu( @@ -2074,7 +2074,7 @@ void test_vloxseg7ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_mu( @@ -2097,7 +2097,7 @@ void test_vloxseg7ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_mu( @@ -2120,7 +2120,7 @@ void test_vloxseg7ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_mu( @@ -2143,7 +2143,7 @@ void test_vloxseg7ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_mu( @@ -2166,7 +2166,7 @@ void test_vloxseg7ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_mu( @@ -2189,7 +2189,7 @@ void test_vloxseg7ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_mu( @@ -2212,7 +2212,7 @@ void test_vloxseg7ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_mu( @@ -2235,7 +2235,7 @@ void test_vloxseg7ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_mu( @@ -2258,7 +2258,7 @@ void test_vloxseg7ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_mu( @@ -2281,7 +2281,7 @@ void test_vloxseg7ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_mu( @@ -2304,7 +2304,7 @@ void test_vloxseg7ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_mu( @@ -2327,7 +2327,7 @@ void test_vloxseg7ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_mu( @@ -2350,7 +2350,7 @@ void test_vloxseg7ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_mu( @@ -2373,7 +2373,7 @@ void test_vloxseg7ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_mu( @@ -2396,6 +2396,6 @@ void test_vloxseg7ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c index bd54233ce614..a6ddec2fc113 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei64.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_tu( @@ -50,7 +50,7 @@ void test_vloxseg7ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_tu( @@ -73,7 +73,7 @@ void test_vloxseg7ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_tu( @@ -96,7 +96,7 @@ void test_vloxseg7ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_tu( @@ -119,7 +119,7 @@ void test_vloxseg7ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_tu( @@ -142,7 +142,7 @@ void test_vloxseg7ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_tu( @@ -165,7 +165,7 @@ void test_vloxseg7ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_tu( @@ -188,7 +188,7 @@ void test_vloxseg7ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_tu( @@ -211,7 +211,7 @@ void test_vloxseg7ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_tu( @@ -234,7 +234,7 @@ void test_vloxseg7ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_tu( @@ -257,7 +257,7 @@ void test_vloxseg7ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_tu( @@ -280,7 +280,7 @@ void test_vloxseg7ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_tu( @@ -303,7 +303,7 @@ void test_vloxseg7ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_tu( @@ -326,7 +326,7 @@ void test_vloxseg7ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_tu( @@ -349,7 +349,7 @@ void test_vloxseg7ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_tu( @@ -372,7 +372,7 @@ void test_vloxseg7ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_tu( @@ -395,7 +395,7 @@ void test_vloxseg7ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_tu( @@ -418,7 +418,7 @@ void test_vloxseg7ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_tu( @@ -441,7 +441,7 @@ void test_vloxseg7ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_tu( @@ -464,7 +464,7 @@ void test_vloxseg7ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_tu( @@ -487,7 +487,7 @@ void test_vloxseg7ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_tu( @@ -510,7 +510,7 @@ void test_vloxseg7ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_tu( @@ -533,7 +533,7 @@ void test_vloxseg7ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_tu( @@ -556,7 +556,7 @@ void test_vloxseg7ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_tu( @@ -579,7 +579,7 @@ void test_vloxseg7ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vloxseg7ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_tum( @@ -625,7 +625,7 @@ void test_vloxseg7ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_tum( @@ -648,7 +648,7 @@ void test_vloxseg7ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_tum( @@ -671,7 +671,7 @@ void test_vloxseg7ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_tum( @@ -694,7 +694,7 @@ void test_vloxseg7ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_tum( @@ -717,7 +717,7 @@ void test_vloxseg7ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_tum( @@ -740,7 +740,7 @@ void test_vloxseg7ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_tum( @@ -763,7 +763,7 @@ void test_vloxseg7ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_tum( @@ -786,7 +786,7 @@ void test_vloxseg7ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_tum( @@ -809,7 +809,7 @@ void test_vloxseg7ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_tum( @@ -832,7 +832,7 @@ void test_vloxseg7ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_tum( @@ -855,7 +855,7 @@ void test_vloxseg7ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_tum( @@ -878,7 +878,7 @@ void test_vloxseg7ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vloxseg7ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_tum( @@ -924,7 +924,7 @@ void test_vloxseg7ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_tum( @@ -947,7 +947,7 @@ void test_vloxseg7ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_tum( @@ -970,7 +970,7 @@ void test_vloxseg7ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_tum( @@ -993,7 +993,7 @@ void test_vloxseg7ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_tum( @@ -1016,7 +1016,7 @@ void test_vloxseg7ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_tum( @@ -1039,7 +1039,7 @@ void test_vloxseg7ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_tum( @@ -1062,7 +1062,7 @@ void test_vloxseg7ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_tum( @@ -1085,7 +1085,7 @@ void test_vloxseg7ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_tum( @@ -1108,7 +1108,7 @@ void test_vloxseg7ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_tum( @@ -1131,7 +1131,7 @@ void test_vloxseg7ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_tum( @@ -1154,7 +1154,7 @@ void test_vloxseg7ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_tum( @@ -1177,7 +1177,7 @@ void test_vloxseg7ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_tum( @@ -1200,7 +1200,7 @@ void test_vloxseg7ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_tumu( @@ -1223,7 +1223,7 @@ void test_vloxseg7ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_tumu( @@ -1246,7 +1246,7 @@ void test_vloxseg7ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_tumu( @@ -1269,7 +1269,7 @@ void test_vloxseg7ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_tumu( @@ -1292,7 +1292,7 @@ void test_vloxseg7ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_tumu( @@ -1315,7 +1315,7 @@ void test_vloxseg7ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_tumu( @@ -1338,7 +1338,7 @@ void test_vloxseg7ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_tumu( @@ -1361,7 +1361,7 @@ void test_vloxseg7ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_tumu( @@ -1384,7 +1384,7 @@ void test_vloxseg7ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_tumu( @@ -1407,7 +1407,7 @@ void test_vloxseg7ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_tumu( @@ -1430,7 +1430,7 @@ void test_vloxseg7ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_tumu( @@ -1453,7 +1453,7 @@ void test_vloxseg7ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_tumu( @@ -1476,7 +1476,7 @@ void test_vloxseg7ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_tumu( @@ -1499,7 +1499,7 @@ void test_vloxseg7ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_tumu( @@ -1522,7 +1522,7 @@ void test_vloxseg7ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_tumu( @@ -1545,7 +1545,7 @@ void test_vloxseg7ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_tumu( @@ -1568,7 +1568,7 @@ void test_vloxseg7ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_tumu( @@ -1591,7 +1591,7 @@ void test_vloxseg7ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_tumu( @@ -1614,7 +1614,7 @@ void test_vloxseg7ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_tumu( @@ -1637,7 +1637,7 @@ void test_vloxseg7ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_tumu( @@ -1660,7 +1660,7 @@ void test_vloxseg7ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_tumu( @@ -1683,7 +1683,7 @@ void test_vloxseg7ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_tumu( @@ -1706,7 +1706,7 @@ void test_vloxseg7ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_tumu( @@ -1729,7 +1729,7 @@ void test_vloxseg7ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_tumu( @@ -1752,7 +1752,7 @@ void test_vloxseg7ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_tumu( @@ -1775,7 +1775,7 @@ void test_vloxseg7ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_tumu( @@ -1798,7 +1798,7 @@ void test_vloxseg7ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_mu( @@ -1821,7 +1821,7 @@ void test_vloxseg7ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_mu( @@ -1844,7 +1844,7 @@ void test_vloxseg7ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_mu( @@ -1867,7 +1867,7 @@ void test_vloxseg7ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_mu( @@ -1890,7 +1890,7 @@ void test_vloxseg7ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_mu( @@ -1913,7 +1913,7 @@ void test_vloxseg7ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_mu( @@ -1936,7 +1936,7 @@ void test_vloxseg7ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_mu( @@ -1959,7 +1959,7 @@ void test_vloxseg7ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_mu( @@ -1982,7 +1982,7 @@ void test_vloxseg7ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_mu( @@ -2005,7 +2005,7 @@ void test_vloxseg7ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_mu( @@ -2028,7 +2028,7 @@ void test_vloxseg7ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_mu( @@ -2051,7 +2051,7 @@ void test_vloxseg7ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_mu( @@ -2074,7 +2074,7 @@ void test_vloxseg7ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_mu( @@ -2097,7 +2097,7 @@ void test_vloxseg7ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_mu( @@ -2120,7 +2120,7 @@ void test_vloxseg7ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_mu( @@ -2143,7 +2143,7 @@ void test_vloxseg7ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_mu( @@ -2166,7 +2166,7 @@ void test_vloxseg7ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_mu( @@ -2189,7 +2189,7 @@ void test_vloxseg7ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_mu( @@ -2212,7 +2212,7 @@ void test_vloxseg7ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_mu( @@ -2235,7 +2235,7 @@ void test_vloxseg7ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_mu( @@ -2258,7 +2258,7 @@ void test_vloxseg7ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_mu( @@ -2281,7 +2281,7 @@ void test_vloxseg7ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_mu( @@ -2304,7 +2304,7 @@ void test_vloxseg7ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_mu( @@ -2327,7 +2327,7 @@ void test_vloxseg7ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_mu( @@ -2350,7 +2350,7 @@ void test_vloxseg7ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_mu( @@ -2373,7 +2373,7 @@ void test_vloxseg7ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_mu( @@ -2396,6 +2396,6 @@ void test_vloxseg7ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c index 703089241448..d4563a0515cf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg7ei8.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_tu( @@ -50,7 +50,7 @@ void test_vloxseg7ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_tu( @@ -73,7 +73,7 @@ void test_vloxseg7ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_tu( @@ -96,7 +96,7 @@ void test_vloxseg7ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_tu( @@ -119,7 +119,7 @@ void test_vloxseg7ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_tu( @@ -142,7 +142,7 @@ void test_vloxseg7ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_tu( @@ -165,7 +165,7 @@ void test_vloxseg7ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_tu( @@ -188,7 +188,7 @@ void test_vloxseg7ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_tu( @@ -211,7 +211,7 @@ void test_vloxseg7ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_tu( @@ -234,7 +234,7 @@ void test_vloxseg7ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_tu( @@ -257,7 +257,7 @@ void test_vloxseg7ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_tu( @@ -280,7 +280,7 @@ void test_vloxseg7ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_tu( @@ -303,7 +303,7 @@ void test_vloxseg7ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_tu( @@ -326,7 +326,7 @@ void test_vloxseg7ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_tu( @@ -349,7 +349,7 @@ void test_vloxseg7ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_tu( @@ -372,7 +372,7 @@ void test_vloxseg7ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_tu( @@ -395,7 +395,7 @@ void test_vloxseg7ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_tu( @@ -418,7 +418,7 @@ void test_vloxseg7ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_tu( @@ -441,7 +441,7 @@ void test_vloxseg7ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_tu( @@ -464,7 +464,7 @@ void test_vloxseg7ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_tu( @@ -487,7 +487,7 @@ void test_vloxseg7ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_tu( @@ -510,7 +510,7 @@ void test_vloxseg7ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_tu( @@ -533,7 +533,7 @@ void test_vloxseg7ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_tu( @@ -556,7 +556,7 @@ void test_vloxseg7ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_tu( @@ -579,7 +579,7 @@ void test_vloxseg7ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vloxseg7ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_tum( @@ -625,7 +625,7 @@ void test_vloxseg7ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_tum( @@ -648,7 +648,7 @@ void test_vloxseg7ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_tum( @@ -671,7 +671,7 @@ void test_vloxseg7ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_tum( @@ -694,7 +694,7 @@ void test_vloxseg7ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_tum( @@ -717,7 +717,7 @@ void test_vloxseg7ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_tum( @@ -740,7 +740,7 @@ void test_vloxseg7ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_tum( @@ -763,7 +763,7 @@ void test_vloxseg7ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_tum( @@ -786,7 +786,7 @@ void test_vloxseg7ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_tum( @@ -809,7 +809,7 @@ void test_vloxseg7ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_tum( @@ -832,7 +832,7 @@ void test_vloxseg7ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_tum( @@ -855,7 +855,7 @@ void test_vloxseg7ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_tum( @@ -878,7 +878,7 @@ void test_vloxseg7ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vloxseg7ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_tum( @@ -924,7 +924,7 @@ void test_vloxseg7ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_tum( @@ -947,7 +947,7 @@ void test_vloxseg7ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_tum( @@ -970,7 +970,7 @@ void test_vloxseg7ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_tum( @@ -993,7 +993,7 @@ void test_vloxseg7ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_tum( @@ -1016,7 +1016,7 @@ void test_vloxseg7ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_tum( @@ -1039,7 +1039,7 @@ void test_vloxseg7ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_tum( @@ -1062,7 +1062,7 @@ void test_vloxseg7ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_tum( @@ -1085,7 +1085,7 @@ void test_vloxseg7ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_tum( @@ -1108,7 +1108,7 @@ void test_vloxseg7ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_tum( @@ -1131,7 +1131,7 @@ void test_vloxseg7ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_tum( @@ -1154,7 +1154,7 @@ void test_vloxseg7ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_tum( @@ -1177,7 +1177,7 @@ void test_vloxseg7ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_tum( @@ -1200,7 +1200,7 @@ void test_vloxseg7ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_tumu( @@ -1223,7 +1223,7 @@ void test_vloxseg7ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_tumu( @@ -1246,7 +1246,7 @@ void test_vloxseg7ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_tumu( @@ -1269,7 +1269,7 @@ void test_vloxseg7ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_tumu( @@ -1292,7 +1292,7 @@ void test_vloxseg7ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_tumu( @@ -1315,7 +1315,7 @@ void test_vloxseg7ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_tumu( @@ -1338,7 +1338,7 @@ void test_vloxseg7ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_tumu( @@ -1361,7 +1361,7 @@ void test_vloxseg7ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_tumu( @@ -1384,7 +1384,7 @@ void test_vloxseg7ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_tumu( @@ -1407,7 +1407,7 @@ void test_vloxseg7ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_tumu( @@ -1430,7 +1430,7 @@ void test_vloxseg7ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_tumu( @@ -1453,7 +1453,7 @@ void test_vloxseg7ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_tumu( @@ -1476,7 +1476,7 @@ void test_vloxseg7ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_tumu( @@ -1499,7 +1499,7 @@ void test_vloxseg7ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_tumu( @@ -1522,7 +1522,7 @@ void test_vloxseg7ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_tumu( @@ -1545,7 +1545,7 @@ void test_vloxseg7ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_tumu( @@ -1568,7 +1568,7 @@ void test_vloxseg7ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_tumu( @@ -1591,7 +1591,7 @@ void test_vloxseg7ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_tumu( @@ -1614,7 +1614,7 @@ void test_vloxseg7ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_tumu( @@ -1637,7 +1637,7 @@ void test_vloxseg7ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_tumu( @@ -1660,7 +1660,7 @@ void test_vloxseg7ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_tumu( @@ -1683,7 +1683,7 @@ void test_vloxseg7ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_tumu( @@ -1706,7 +1706,7 @@ void test_vloxseg7ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_tumu( @@ -1729,7 +1729,7 @@ void test_vloxseg7ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_tumu( @@ -1752,7 +1752,7 @@ void test_vloxseg7ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_tumu( @@ -1775,7 +1775,7 @@ void test_vloxseg7ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_tumu( @@ -1798,7 +1798,7 @@ void test_vloxseg7ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_mu( @@ -1821,7 +1821,7 @@ void test_vloxseg7ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_mu( @@ -1844,7 +1844,7 @@ void test_vloxseg7ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_mu( @@ -1867,7 +1867,7 @@ void test_vloxseg7ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_mu( @@ -1890,7 +1890,7 @@ void test_vloxseg7ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_mu( @@ -1913,7 +1913,7 @@ void test_vloxseg7ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_mu( @@ -1936,7 +1936,7 @@ void test_vloxseg7ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_mu( @@ -1959,7 +1959,7 @@ void test_vloxseg7ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_mu( @@ -1982,7 +1982,7 @@ void test_vloxseg7ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_mu( @@ -2005,7 +2005,7 @@ void test_vloxseg7ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_mu( @@ -2028,7 +2028,7 @@ void test_vloxseg7ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_mu( @@ -2051,7 +2051,7 @@ void test_vloxseg7ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_mu( @@ -2074,7 +2074,7 @@ void test_vloxseg7ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_mu( @@ -2097,7 +2097,7 @@ void test_vloxseg7ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_mu( @@ -2120,7 +2120,7 @@ void test_vloxseg7ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_mu( @@ -2143,7 +2143,7 @@ void test_vloxseg7ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_mu( @@ -2166,7 +2166,7 @@ void test_vloxseg7ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_mu( @@ -2189,7 +2189,7 @@ void test_vloxseg7ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_mu( @@ -2212,7 +2212,7 @@ void test_vloxseg7ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_mu( @@ -2235,7 +2235,7 @@ void test_vloxseg7ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_mu( @@ -2258,7 +2258,7 @@ void test_vloxseg7ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_mu( @@ -2281,7 +2281,7 @@ void test_vloxseg7ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_mu( @@ -2304,7 +2304,7 @@ void test_vloxseg7ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_mu( @@ -2327,7 +2327,7 @@ void test_vloxseg7ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_mu( @@ -2350,7 +2350,7 @@ void test_vloxseg7ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_mu( @@ -2373,7 +2373,7 @@ void test_vloxseg7ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_mu( @@ -2396,6 +2396,6 @@ void test_vloxseg7ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg7ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vloxseg7ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c index d431c59e5415..bd410617870b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei16.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_tu( @@ -54,7 +54,7 @@ void test_vloxseg8ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_tu( @@ -79,7 +79,7 @@ void test_vloxseg8ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_tu( @@ -104,7 +104,7 @@ void test_vloxseg8ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_tu( @@ -129,7 +129,7 @@ void test_vloxseg8ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_tu( @@ -154,7 +154,7 @@ void test_vloxseg8ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_tu( @@ -179,7 +179,7 @@ void test_vloxseg8ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_tu( @@ -204,7 +204,7 @@ void test_vloxseg8ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_tu( @@ -229,7 +229,7 @@ void test_vloxseg8ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_tu( @@ -254,7 +254,7 @@ void test_vloxseg8ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_tu( @@ -279,7 +279,7 @@ void test_vloxseg8ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_tu( @@ -304,7 +304,7 @@ void test_vloxseg8ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_tu( @@ -329,7 +329,7 @@ void test_vloxseg8ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_tu( @@ -354,7 +354,7 @@ void test_vloxseg8ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_tu( @@ -379,7 +379,7 @@ void test_vloxseg8ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_tu( @@ -404,7 +404,7 @@ void test_vloxseg8ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_tu( @@ -429,7 +429,7 @@ void test_vloxseg8ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_tu( @@ -454,7 +454,7 @@ void test_vloxseg8ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_tu( @@ -479,7 +479,7 @@ void test_vloxseg8ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_tu( @@ -504,7 +504,7 @@ void test_vloxseg8ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_tu( @@ -529,7 +529,7 @@ void test_vloxseg8ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_tu( @@ -554,7 +554,7 @@ void test_vloxseg8ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_tu( @@ -579,7 +579,7 @@ void test_vloxseg8ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_tu( @@ -604,7 +604,7 @@ void test_vloxseg8ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_tu( @@ -629,7 +629,7 @@ void test_vloxseg8ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_tu( @@ -654,7 +654,7 @@ void test_vloxseg8ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_tum( @@ -679,7 +679,7 @@ void test_vloxseg8ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_tum( @@ -704,7 +704,7 @@ void test_vloxseg8ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_tum( @@ -729,7 +729,7 @@ void test_vloxseg8ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_tum( @@ -754,7 +754,7 @@ void test_vloxseg8ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_tum( @@ -779,7 +779,7 @@ void test_vloxseg8ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_tum( @@ -804,7 +804,7 @@ void test_vloxseg8ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_tum( @@ -829,7 +829,7 @@ void test_vloxseg8ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_tum( @@ -854,7 +854,7 @@ void test_vloxseg8ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_tum( @@ -879,7 +879,7 @@ void test_vloxseg8ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_tum( @@ -904,7 +904,7 @@ void test_vloxseg8ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_tum( @@ -929,7 +929,7 @@ void test_vloxseg8ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_tum( @@ -954,7 +954,7 @@ void test_vloxseg8ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_tum( @@ -979,7 +979,7 @@ void test_vloxseg8ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_tum( @@ -1004,7 +1004,7 @@ void test_vloxseg8ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_tum( @@ -1029,7 +1029,7 @@ void test_vloxseg8ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg8ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_tum( @@ -1079,7 +1079,7 @@ void test_vloxseg8ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_tum( @@ -1104,7 +1104,7 @@ void test_vloxseg8ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_tum( @@ -1129,7 +1129,7 @@ void test_vloxseg8ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_tum( @@ -1154,7 +1154,7 @@ void test_vloxseg8ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_tum( @@ -1179,7 +1179,7 @@ void test_vloxseg8ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_tum( @@ -1204,7 +1204,7 @@ void test_vloxseg8ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_tum( @@ -1229,7 +1229,7 @@ void test_vloxseg8ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_tum( @@ -1254,7 +1254,7 @@ void test_vloxseg8ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_tum( @@ -1279,7 +1279,7 @@ void test_vloxseg8ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_tum( @@ -1304,7 +1304,7 @@ void test_vloxseg8ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_tumu( @@ -1329,7 +1329,7 @@ void test_vloxseg8ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vloxseg8ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_tumu( @@ -1379,7 +1379,7 @@ void test_vloxseg8ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_tumu( @@ -1404,7 +1404,7 @@ void test_vloxseg8ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg8ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_tumu( @@ -1454,7 +1454,7 @@ void test_vloxseg8ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_tumu( @@ -1479,7 +1479,7 @@ void test_vloxseg8ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_tumu( @@ -1504,7 +1504,7 @@ void test_vloxseg8ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_tumu( @@ -1529,7 +1529,7 @@ void test_vloxseg8ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_tumu( @@ -1554,7 +1554,7 @@ void test_vloxseg8ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg8ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_tumu( @@ -1604,7 +1604,7 @@ void test_vloxseg8ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_tumu( @@ -1629,7 +1629,7 @@ void test_vloxseg8ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_tumu( @@ -1654,7 +1654,7 @@ void test_vloxseg8ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_tumu( @@ -1679,7 +1679,7 @@ void test_vloxseg8ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_tumu( @@ -1704,7 +1704,7 @@ void test_vloxseg8ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_tumu( @@ -1729,7 +1729,7 @@ void test_vloxseg8ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_tumu( @@ -1754,7 +1754,7 @@ void test_vloxseg8ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_tumu( @@ -1779,7 +1779,7 @@ void test_vloxseg8ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_tumu( @@ -1804,7 +1804,7 @@ void test_vloxseg8ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_tumu( @@ -1829,7 +1829,7 @@ void test_vloxseg8ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_tumu( @@ -1854,7 +1854,7 @@ void test_vloxseg8ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_tumu( @@ -1879,7 +1879,7 @@ void test_vloxseg8ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_tumu( @@ -1904,7 +1904,7 @@ void test_vloxseg8ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_tumu( @@ -1929,7 +1929,7 @@ void test_vloxseg8ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_tumu( @@ -1954,7 +1954,7 @@ void test_vloxseg8ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_mu( @@ -1979,7 +1979,7 @@ void test_vloxseg8ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_mu( @@ -2004,7 +2004,7 @@ void test_vloxseg8ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_mu( @@ -2029,7 +2029,7 @@ void test_vloxseg8ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_mu( @@ -2054,7 +2054,7 @@ void test_vloxseg8ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_mu( @@ -2079,7 +2079,7 @@ void test_vloxseg8ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_mu( @@ -2104,7 +2104,7 @@ void test_vloxseg8ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_mu( @@ -2129,7 +2129,7 @@ void test_vloxseg8ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_mu( @@ -2154,7 +2154,7 @@ void test_vloxseg8ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_mu( @@ -2179,7 +2179,7 @@ void test_vloxseg8ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_mu( @@ -2204,7 +2204,7 @@ void test_vloxseg8ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_mu( @@ -2229,7 +2229,7 @@ void test_vloxseg8ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_mu( @@ -2254,7 +2254,7 @@ void test_vloxseg8ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_mu( @@ -2279,7 +2279,7 @@ void test_vloxseg8ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_mu( @@ -2304,7 +2304,7 @@ void test_vloxseg8ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_mu( @@ -2329,7 +2329,7 @@ void test_vloxseg8ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_mu( @@ -2354,7 +2354,7 @@ void test_vloxseg8ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_mu( @@ -2379,7 +2379,7 @@ void test_vloxseg8ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_mu( @@ -2404,7 +2404,7 @@ void test_vloxseg8ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_mu( @@ -2429,7 +2429,7 @@ void test_vloxseg8ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_mu( @@ -2454,7 +2454,7 @@ void test_vloxseg8ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_mu( @@ -2479,7 +2479,7 @@ void test_vloxseg8ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_mu( @@ -2504,7 +2504,7 @@ void test_vloxseg8ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_mu( @@ -2529,7 +2529,7 @@ void test_vloxseg8ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_mu( @@ -2554,7 +2554,7 @@ void test_vloxseg8ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_mu( @@ -2579,7 +2579,7 @@ void test_vloxseg8ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_mu( @@ -2604,6 +2604,6 @@ void test_vloxseg8ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c index d084443b453f..dbeb7465b647 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei32.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_tu( @@ -54,7 +54,7 @@ void test_vloxseg8ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_tu( @@ -79,7 +79,7 @@ void test_vloxseg8ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_tu( @@ -104,7 +104,7 @@ void test_vloxseg8ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_tu( @@ -129,7 +129,7 @@ void test_vloxseg8ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_tu( @@ -154,7 +154,7 @@ void test_vloxseg8ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_tu( @@ -179,7 +179,7 @@ void test_vloxseg8ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_tu( @@ -204,7 +204,7 @@ void test_vloxseg8ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_tu( @@ -229,7 +229,7 @@ void test_vloxseg8ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_tu( @@ -254,7 +254,7 @@ void test_vloxseg8ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_tu( @@ -279,7 +279,7 @@ void test_vloxseg8ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_tu( @@ -304,7 +304,7 @@ void test_vloxseg8ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_tu( @@ -329,7 +329,7 @@ void test_vloxseg8ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_tu( @@ -354,7 +354,7 @@ void test_vloxseg8ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_tu( @@ -379,7 +379,7 @@ void test_vloxseg8ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_tu( @@ -404,7 +404,7 @@ void test_vloxseg8ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_tu( @@ -429,7 +429,7 @@ void test_vloxseg8ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_tu( @@ -454,7 +454,7 @@ void test_vloxseg8ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_tu( @@ -479,7 +479,7 @@ void test_vloxseg8ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_tu( @@ -504,7 +504,7 @@ void test_vloxseg8ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_tu( @@ -529,7 +529,7 @@ void test_vloxseg8ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_tu( @@ -554,7 +554,7 @@ void test_vloxseg8ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_tu( @@ -579,7 +579,7 @@ void test_vloxseg8ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_tu( @@ -604,7 +604,7 @@ void test_vloxseg8ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_tu( @@ -629,7 +629,7 @@ void test_vloxseg8ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_tu( @@ -654,7 +654,7 @@ void test_vloxseg8ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_tum( @@ -679,7 +679,7 @@ void test_vloxseg8ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_tum( @@ -704,7 +704,7 @@ void test_vloxseg8ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_tum( @@ -729,7 +729,7 @@ void test_vloxseg8ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_tum( @@ -754,7 +754,7 @@ void test_vloxseg8ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_tum( @@ -779,7 +779,7 @@ void test_vloxseg8ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_tum( @@ -804,7 +804,7 @@ void test_vloxseg8ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_tum( @@ -829,7 +829,7 @@ void test_vloxseg8ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_tum( @@ -854,7 +854,7 @@ void test_vloxseg8ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_tum( @@ -879,7 +879,7 @@ void test_vloxseg8ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_tum( @@ -904,7 +904,7 @@ void test_vloxseg8ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_tum( @@ -929,7 +929,7 @@ void test_vloxseg8ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_tum( @@ -954,7 +954,7 @@ void test_vloxseg8ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_tum( @@ -979,7 +979,7 @@ void test_vloxseg8ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_tum( @@ -1004,7 +1004,7 @@ void test_vloxseg8ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_tum( @@ -1029,7 +1029,7 @@ void test_vloxseg8ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg8ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_tum( @@ -1079,7 +1079,7 @@ void test_vloxseg8ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_tum( @@ -1104,7 +1104,7 @@ void test_vloxseg8ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_tum( @@ -1129,7 +1129,7 @@ void test_vloxseg8ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_tum( @@ -1154,7 +1154,7 @@ void test_vloxseg8ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_tum( @@ -1179,7 +1179,7 @@ void test_vloxseg8ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_tum( @@ -1204,7 +1204,7 @@ void test_vloxseg8ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_tum( @@ -1229,7 +1229,7 @@ void test_vloxseg8ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_tum( @@ -1254,7 +1254,7 @@ void test_vloxseg8ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_tum( @@ -1279,7 +1279,7 @@ void test_vloxseg8ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_tum( @@ -1304,7 +1304,7 @@ void test_vloxseg8ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_tumu( @@ -1329,7 +1329,7 @@ void test_vloxseg8ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vloxseg8ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_tumu( @@ -1379,7 +1379,7 @@ void test_vloxseg8ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_tumu( @@ -1404,7 +1404,7 @@ void test_vloxseg8ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg8ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_tumu( @@ -1454,7 +1454,7 @@ void test_vloxseg8ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_tumu( @@ -1479,7 +1479,7 @@ void test_vloxseg8ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_tumu( @@ -1504,7 +1504,7 @@ void test_vloxseg8ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_tumu( @@ -1529,7 +1529,7 @@ void test_vloxseg8ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_tumu( @@ -1554,7 +1554,7 @@ void test_vloxseg8ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg8ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_tumu( @@ -1604,7 +1604,7 @@ void test_vloxseg8ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_tumu( @@ -1629,7 +1629,7 @@ void test_vloxseg8ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_tumu( @@ -1654,7 +1654,7 @@ void test_vloxseg8ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_tumu( @@ -1679,7 +1679,7 @@ void test_vloxseg8ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_tumu( @@ -1704,7 +1704,7 @@ void test_vloxseg8ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_tumu( @@ -1729,7 +1729,7 @@ void test_vloxseg8ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_tumu( @@ -1754,7 +1754,7 @@ void test_vloxseg8ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_tumu( @@ -1779,7 +1779,7 @@ void test_vloxseg8ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_tumu( @@ -1804,7 +1804,7 @@ void test_vloxseg8ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_tumu( @@ -1829,7 +1829,7 @@ void test_vloxseg8ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_tumu( @@ -1854,7 +1854,7 @@ void test_vloxseg8ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_tumu( @@ -1879,7 +1879,7 @@ void test_vloxseg8ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_tumu( @@ -1904,7 +1904,7 @@ void test_vloxseg8ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_tumu( @@ -1929,7 +1929,7 @@ void test_vloxseg8ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_tumu( @@ -1954,7 +1954,7 @@ void test_vloxseg8ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_mu( @@ -1979,7 +1979,7 @@ void test_vloxseg8ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_mu( @@ -2004,7 +2004,7 @@ void test_vloxseg8ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_mu( @@ -2029,7 +2029,7 @@ void test_vloxseg8ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_mu( @@ -2054,7 +2054,7 @@ void test_vloxseg8ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_mu( @@ -2079,7 +2079,7 @@ void test_vloxseg8ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_mu( @@ -2104,7 +2104,7 @@ void test_vloxseg8ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_mu( @@ -2129,7 +2129,7 @@ void test_vloxseg8ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_mu( @@ -2154,7 +2154,7 @@ void test_vloxseg8ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_mu( @@ -2179,7 +2179,7 @@ void test_vloxseg8ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_mu( @@ -2204,7 +2204,7 @@ void test_vloxseg8ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_mu( @@ -2229,7 +2229,7 @@ void test_vloxseg8ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_mu( @@ -2254,7 +2254,7 @@ void test_vloxseg8ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_mu( @@ -2279,7 +2279,7 @@ void test_vloxseg8ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_mu( @@ -2304,7 +2304,7 @@ void test_vloxseg8ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_mu( @@ -2329,7 +2329,7 @@ void test_vloxseg8ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_mu( @@ -2354,7 +2354,7 @@ void test_vloxseg8ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_mu( @@ -2379,7 +2379,7 @@ void test_vloxseg8ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_mu( @@ -2404,7 +2404,7 @@ void test_vloxseg8ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_mu( @@ -2429,7 +2429,7 @@ void test_vloxseg8ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_mu( @@ -2454,7 +2454,7 @@ void test_vloxseg8ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_mu( @@ -2479,7 +2479,7 @@ void test_vloxseg8ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_mu( @@ -2504,7 +2504,7 @@ void test_vloxseg8ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_mu( @@ -2529,7 +2529,7 @@ void test_vloxseg8ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_mu( @@ -2554,7 +2554,7 @@ void test_vloxseg8ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_mu( @@ -2579,7 +2579,7 @@ void test_vloxseg8ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_mu( @@ -2604,6 +2604,6 @@ void test_vloxseg8ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c index 7034521166cf..900bbc827ba5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei64.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_tu( @@ -54,7 +54,7 @@ void test_vloxseg8ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_tu( @@ -79,7 +79,7 @@ void test_vloxseg8ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_tu( @@ -104,7 +104,7 @@ void test_vloxseg8ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_tu( @@ -129,7 +129,7 @@ void test_vloxseg8ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_tu( @@ -154,7 +154,7 @@ void test_vloxseg8ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_tu( @@ -179,7 +179,7 @@ void test_vloxseg8ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_tu( @@ -204,7 +204,7 @@ void test_vloxseg8ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_tu( @@ -229,7 +229,7 @@ void test_vloxseg8ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_tu( @@ -254,7 +254,7 @@ void test_vloxseg8ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_tu( @@ -279,7 +279,7 @@ void test_vloxseg8ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_tu( @@ -304,7 +304,7 @@ void test_vloxseg8ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_tu( @@ -329,7 +329,7 @@ void test_vloxseg8ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_tu( @@ -354,7 +354,7 @@ void test_vloxseg8ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_tu( @@ -379,7 +379,7 @@ void test_vloxseg8ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_tu( @@ -404,7 +404,7 @@ void test_vloxseg8ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_tu( @@ -429,7 +429,7 @@ void test_vloxseg8ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_tu( @@ -454,7 +454,7 @@ void test_vloxseg8ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_tu( @@ -479,7 +479,7 @@ void test_vloxseg8ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_tu( @@ -504,7 +504,7 @@ void test_vloxseg8ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_tu( @@ -529,7 +529,7 @@ void test_vloxseg8ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_tu( @@ -554,7 +554,7 @@ void test_vloxseg8ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_tu( @@ -579,7 +579,7 @@ void test_vloxseg8ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_tu( @@ -604,7 +604,7 @@ void test_vloxseg8ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_tu( @@ -629,7 +629,7 @@ void test_vloxseg8ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_tu( @@ -654,7 +654,7 @@ void test_vloxseg8ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_tum( @@ -679,7 +679,7 @@ void test_vloxseg8ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_tum( @@ -704,7 +704,7 @@ void test_vloxseg8ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_tum( @@ -729,7 +729,7 @@ void test_vloxseg8ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_tum( @@ -754,7 +754,7 @@ void test_vloxseg8ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_tum( @@ -779,7 +779,7 @@ void test_vloxseg8ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_tum( @@ -804,7 +804,7 @@ void test_vloxseg8ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_tum( @@ -829,7 +829,7 @@ void test_vloxseg8ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_tum( @@ -854,7 +854,7 @@ void test_vloxseg8ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_tum( @@ -879,7 +879,7 @@ void test_vloxseg8ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_tum( @@ -904,7 +904,7 @@ void test_vloxseg8ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_tum( @@ -929,7 +929,7 @@ void test_vloxseg8ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_tum( @@ -954,7 +954,7 @@ void test_vloxseg8ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_tum( @@ -979,7 +979,7 @@ void test_vloxseg8ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_tum( @@ -1004,7 +1004,7 @@ void test_vloxseg8ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_tum( @@ -1029,7 +1029,7 @@ void test_vloxseg8ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg8ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_tum( @@ -1079,7 +1079,7 @@ void test_vloxseg8ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_tum( @@ -1104,7 +1104,7 @@ void test_vloxseg8ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_tum( @@ -1129,7 +1129,7 @@ void test_vloxseg8ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_tum( @@ -1154,7 +1154,7 @@ void test_vloxseg8ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_tum( @@ -1179,7 +1179,7 @@ void test_vloxseg8ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_tum( @@ -1204,7 +1204,7 @@ void test_vloxseg8ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_tum( @@ -1229,7 +1229,7 @@ void test_vloxseg8ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_tum( @@ -1254,7 +1254,7 @@ void test_vloxseg8ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_tum( @@ -1279,7 +1279,7 @@ void test_vloxseg8ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_tum( @@ -1304,7 +1304,7 @@ void test_vloxseg8ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_tumu( @@ -1329,7 +1329,7 @@ void test_vloxseg8ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vloxseg8ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_tumu( @@ -1379,7 +1379,7 @@ void test_vloxseg8ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_tumu( @@ -1404,7 +1404,7 @@ void test_vloxseg8ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg8ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_tumu( @@ -1454,7 +1454,7 @@ void test_vloxseg8ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_tumu( @@ -1479,7 +1479,7 @@ void test_vloxseg8ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_tumu( @@ -1504,7 +1504,7 @@ void test_vloxseg8ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_tumu( @@ -1529,7 +1529,7 @@ void test_vloxseg8ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_tumu( @@ -1554,7 +1554,7 @@ void test_vloxseg8ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg8ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_tumu( @@ -1604,7 +1604,7 @@ void test_vloxseg8ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_tumu( @@ -1629,7 +1629,7 @@ void test_vloxseg8ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_tumu( @@ -1654,7 +1654,7 @@ void test_vloxseg8ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_tumu( @@ -1679,7 +1679,7 @@ void test_vloxseg8ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_tumu( @@ -1704,7 +1704,7 @@ void test_vloxseg8ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_tumu( @@ -1729,7 +1729,7 @@ void test_vloxseg8ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_tumu( @@ -1754,7 +1754,7 @@ void test_vloxseg8ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_tumu( @@ -1779,7 +1779,7 @@ void test_vloxseg8ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_tumu( @@ -1804,7 +1804,7 @@ void test_vloxseg8ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_tumu( @@ -1829,7 +1829,7 @@ void test_vloxseg8ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_tumu( @@ -1854,7 +1854,7 @@ void test_vloxseg8ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_tumu( @@ -1879,7 +1879,7 @@ void test_vloxseg8ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_tumu( @@ -1904,7 +1904,7 @@ void test_vloxseg8ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_tumu( @@ -1929,7 +1929,7 @@ void test_vloxseg8ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_tumu( @@ -1954,7 +1954,7 @@ void test_vloxseg8ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_mu( @@ -1979,7 +1979,7 @@ void test_vloxseg8ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_mu( @@ -2004,7 +2004,7 @@ void test_vloxseg8ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_mu( @@ -2029,7 +2029,7 @@ void test_vloxseg8ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_mu( @@ -2054,7 +2054,7 @@ void test_vloxseg8ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_mu( @@ -2079,7 +2079,7 @@ void test_vloxseg8ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_mu( @@ -2104,7 +2104,7 @@ void test_vloxseg8ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_mu( @@ -2129,7 +2129,7 @@ void test_vloxseg8ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_mu( @@ -2154,7 +2154,7 @@ void test_vloxseg8ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_mu( @@ -2179,7 +2179,7 @@ void test_vloxseg8ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_mu( @@ -2204,7 +2204,7 @@ void test_vloxseg8ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_mu( @@ -2229,7 +2229,7 @@ void test_vloxseg8ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_mu( @@ -2254,7 +2254,7 @@ void test_vloxseg8ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_mu( @@ -2279,7 +2279,7 @@ void test_vloxseg8ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_mu( @@ -2304,7 +2304,7 @@ void test_vloxseg8ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_mu( @@ -2329,7 +2329,7 @@ void test_vloxseg8ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_mu( @@ -2354,7 +2354,7 @@ void test_vloxseg8ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_mu( @@ -2379,7 +2379,7 @@ void test_vloxseg8ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_mu( @@ -2404,7 +2404,7 @@ void test_vloxseg8ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_mu( @@ -2429,7 +2429,7 @@ void test_vloxseg8ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_mu( @@ -2454,7 +2454,7 @@ void test_vloxseg8ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_mu( @@ -2479,7 +2479,7 @@ void test_vloxseg8ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_mu( @@ -2504,7 +2504,7 @@ void test_vloxseg8ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_mu( @@ -2529,7 +2529,7 @@ void test_vloxseg8ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_mu( @@ -2554,7 +2554,7 @@ void test_vloxseg8ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_mu( @@ -2579,7 +2579,7 @@ void test_vloxseg8ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_mu( @@ -2604,6 +2604,6 @@ void test_vloxseg8ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c index 7db880670d64..323a1e8bdde2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vloxseg8ei8.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_tu( @@ -54,7 +54,7 @@ void test_vloxseg8ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_tu( @@ -79,7 +79,7 @@ void test_vloxseg8ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_tu( @@ -104,7 +104,7 @@ void test_vloxseg8ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_tu( @@ -129,7 +129,7 @@ void test_vloxseg8ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_tu( @@ -154,7 +154,7 @@ void test_vloxseg8ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_tu( @@ -179,7 +179,7 @@ void test_vloxseg8ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_tu( @@ -204,7 +204,7 @@ void test_vloxseg8ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_tu( @@ -229,7 +229,7 @@ void test_vloxseg8ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_tu( @@ -254,7 +254,7 @@ void test_vloxseg8ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_tu( @@ -279,7 +279,7 @@ void test_vloxseg8ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_tu( @@ -304,7 +304,7 @@ void test_vloxseg8ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_tu( @@ -329,7 +329,7 @@ void test_vloxseg8ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_tu( @@ -354,7 +354,7 @@ void test_vloxseg8ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_tu( @@ -379,7 +379,7 @@ void test_vloxseg8ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_tu( @@ -404,7 +404,7 @@ void test_vloxseg8ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_tu( @@ -429,7 +429,7 @@ void test_vloxseg8ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_tu( @@ -454,7 +454,7 @@ void test_vloxseg8ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_tu( @@ -479,7 +479,7 @@ void test_vloxseg8ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_tu( @@ -504,7 +504,7 @@ void test_vloxseg8ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_tu( @@ -529,7 +529,7 @@ void test_vloxseg8ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_tu( @@ -554,7 +554,7 @@ void test_vloxseg8ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_tu( @@ -579,7 +579,7 @@ void test_vloxseg8ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_tu( @@ -604,7 +604,7 @@ void test_vloxseg8ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_tu( @@ -629,7 +629,7 @@ void test_vloxseg8ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_tu( @@ -654,7 +654,7 @@ void test_vloxseg8ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_tum( @@ -679,7 +679,7 @@ void test_vloxseg8ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_tum( @@ -704,7 +704,7 @@ void test_vloxseg8ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_tum( @@ -729,7 +729,7 @@ void test_vloxseg8ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_tum( @@ -754,7 +754,7 @@ void test_vloxseg8ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_tum( @@ -779,7 +779,7 @@ void test_vloxseg8ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_tum( @@ -804,7 +804,7 @@ void test_vloxseg8ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_tum( @@ -829,7 +829,7 @@ void test_vloxseg8ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_tum( @@ -854,7 +854,7 @@ void test_vloxseg8ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_tum( @@ -879,7 +879,7 @@ void test_vloxseg8ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_tum( @@ -904,7 +904,7 @@ void test_vloxseg8ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_tum( @@ -929,7 +929,7 @@ void test_vloxseg8ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_tum( @@ -954,7 +954,7 @@ void test_vloxseg8ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_tum( @@ -979,7 +979,7 @@ void test_vloxseg8ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_tum( @@ -1004,7 +1004,7 @@ void test_vloxseg8ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_tum( @@ -1029,7 +1029,7 @@ void test_vloxseg8ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_tum( @@ -1054,7 +1054,7 @@ void test_vloxseg8ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_tum( @@ -1079,7 +1079,7 @@ void test_vloxseg8ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_tum( @@ -1104,7 +1104,7 @@ void test_vloxseg8ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_tum( @@ -1129,7 +1129,7 @@ void test_vloxseg8ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_tum( @@ -1154,7 +1154,7 @@ void test_vloxseg8ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_tum( @@ -1179,7 +1179,7 @@ void test_vloxseg8ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_tum( @@ -1204,7 +1204,7 @@ void test_vloxseg8ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_tum( @@ -1229,7 +1229,7 @@ void test_vloxseg8ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_tum( @@ -1254,7 +1254,7 @@ void test_vloxseg8ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_tum( @@ -1279,7 +1279,7 @@ void test_vloxseg8ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_tum( @@ -1304,7 +1304,7 @@ void test_vloxseg8ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_tumu( @@ -1329,7 +1329,7 @@ void test_vloxseg8ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vloxseg8ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_tumu( @@ -1379,7 +1379,7 @@ void test_vloxseg8ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_tumu( @@ -1404,7 +1404,7 @@ void test_vloxseg8ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_tumu( @@ -1429,7 +1429,7 @@ void test_vloxseg8ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_tumu( @@ -1454,7 +1454,7 @@ void test_vloxseg8ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_tumu( @@ -1479,7 +1479,7 @@ void test_vloxseg8ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_tumu( @@ -1504,7 +1504,7 @@ void test_vloxseg8ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_tumu( @@ -1529,7 +1529,7 @@ void test_vloxseg8ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_tumu( @@ -1554,7 +1554,7 @@ void test_vloxseg8ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_tumu( @@ -1579,7 +1579,7 @@ void test_vloxseg8ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_tumu( @@ -1604,7 +1604,7 @@ void test_vloxseg8ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_tumu( @@ -1629,7 +1629,7 @@ void test_vloxseg8ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_tumu( @@ -1654,7 +1654,7 @@ void test_vloxseg8ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_tumu( @@ -1679,7 +1679,7 @@ void test_vloxseg8ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_tumu( @@ -1704,7 +1704,7 @@ void test_vloxseg8ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_tumu( @@ -1729,7 +1729,7 @@ void test_vloxseg8ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_tumu( @@ -1754,7 +1754,7 @@ void test_vloxseg8ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_tumu( @@ -1779,7 +1779,7 @@ void test_vloxseg8ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_tumu( @@ -1804,7 +1804,7 @@ void test_vloxseg8ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_tumu( @@ -1829,7 +1829,7 @@ void test_vloxseg8ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_tumu( @@ -1854,7 +1854,7 @@ void test_vloxseg8ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_tumu( @@ -1879,7 +1879,7 @@ void test_vloxseg8ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_tumu( @@ -1904,7 +1904,7 @@ void test_vloxseg8ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_tumu( @@ -1929,7 +1929,7 @@ void test_vloxseg8ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_tumu( @@ -1954,7 +1954,7 @@ void test_vloxseg8ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_mu( @@ -1979,7 +1979,7 @@ void test_vloxseg8ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_mu( @@ -2004,7 +2004,7 @@ void test_vloxseg8ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_mu( @@ -2029,7 +2029,7 @@ void test_vloxseg8ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_mu( @@ -2054,7 +2054,7 @@ void test_vloxseg8ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_mu( @@ -2079,7 +2079,7 @@ void test_vloxseg8ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_mu( @@ -2104,7 +2104,7 @@ void test_vloxseg8ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_mu( @@ -2129,7 +2129,7 @@ void test_vloxseg8ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_mu( @@ -2154,7 +2154,7 @@ void test_vloxseg8ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_mu( @@ -2179,7 +2179,7 @@ void test_vloxseg8ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_mu( @@ -2204,7 +2204,7 @@ void test_vloxseg8ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_mu( @@ -2229,7 +2229,7 @@ void test_vloxseg8ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_mu( @@ -2254,7 +2254,7 @@ void test_vloxseg8ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_mu( @@ -2279,7 +2279,7 @@ void test_vloxseg8ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_mu( @@ -2304,7 +2304,7 @@ void test_vloxseg8ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_mu( @@ -2329,7 +2329,7 @@ void test_vloxseg8ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_mu( @@ -2354,7 +2354,7 @@ void test_vloxseg8ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_mu( @@ -2379,7 +2379,7 @@ void test_vloxseg8ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_mu( @@ -2404,7 +2404,7 @@ void test_vloxseg8ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_mu( @@ -2429,7 +2429,7 @@ void test_vloxseg8ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_mu( @@ -2454,7 +2454,7 @@ void test_vloxseg8ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_mu( @@ -2479,7 +2479,7 @@ void test_vloxseg8ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_mu( @@ -2504,7 +2504,7 @@ void test_vloxseg8ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_mu( @@ -2529,7 +2529,7 @@ void test_vloxseg8ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_mu( @@ -2554,7 +2554,7 @@ void test_vloxseg8ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_mu( @@ -2579,7 +2579,7 @@ void test_vloxseg8ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_mu( @@ -2604,6 +2604,6 @@ void test_vloxseg8ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vloxseg8ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vloxseg8ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse16.c index 1bd8c50ab29c..a829305a0e03 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlse16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16mf4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vlse16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *b // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlse16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16mf2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vlse16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *b // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlse16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m1_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m1_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vlse16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlse16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vlse16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlse16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vlse16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlse16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vlse16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlse16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16mf4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_tu( @@ -76,7 +76,7 @@ vint16mf4_t test_vlse16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlse16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16mf2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_tu( @@ -85,7 +85,7 @@ vint16mf2_t test_vlse16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlse16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m1_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m1_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_tu( @@ -94,7 +94,7 @@ vint16m1_t test_vlse16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlse16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_tu( @@ -103,7 +103,7 @@ vint16m2_t test_vlse16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlse16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_tu( @@ -112,7 +112,7 @@ vint16m4_t test_vlse16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlse16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_tu( @@ -121,7 +121,7 @@ vint16m8_t test_vlse16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlse16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16mf4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_tu( @@ -130,7 +130,7 @@ vuint16mf4_t test_vlse16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlse16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16mf2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_tu( @@ -139,7 +139,7 @@ vuint16mf2_t test_vlse16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlse16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m1_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m1_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_tu( @@ -148,7 +148,7 @@ vuint16m1_t test_vlse16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlse16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_tu( @@ -157,7 +157,7 @@ vuint16m2_t test_vlse16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlse16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_tu( @@ -166,7 +166,7 @@ vuint16m4_t test_vlse16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlse16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_tum( @@ -175,7 +175,7 @@ vuint16m8_t test_vlse16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlse16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16mf4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_tum( @@ -184,7 +184,7 @@ vfloat16mf4_t test_vlse16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlse16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16mf2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m1_tum( @@ -193,7 +193,7 @@ vfloat16mf2_t test_vlse16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlse16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m1_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m1_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m2_tum( @@ -202,7 +202,7 @@ vfloat16m1_t test_vlse16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlse16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m4_tum( @@ -211,7 +211,7 @@ vfloat16m2_t test_vlse16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlse16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m8_tum( @@ -220,7 +220,7 @@ vfloat16m4_t test_vlse16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlse16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_tum( @@ -229,7 +229,7 @@ vfloat16m8_t test_vlse16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlse16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16mf4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_tum( @@ -238,7 +238,7 @@ vint16mf4_t test_vlse16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlse16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16mf2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_tum( @@ -247,7 +247,7 @@ vint16mf2_t test_vlse16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlse16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m1_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m1_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_tum( @@ -256,7 +256,7 @@ vint16m1_t test_vlse16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlse16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_tum( @@ -265,7 +265,7 @@ vint16m2_t test_vlse16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlse16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_tum( @@ -274,7 +274,7 @@ vint16m4_t test_vlse16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlse16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_tum( @@ -283,7 +283,7 @@ vint16m8_t test_vlse16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlse16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16mf4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_tum( @@ -292,7 +292,7 @@ vuint16mf4_t test_vlse16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlse16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16mf2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_tum( @@ -301,7 +301,7 @@ vuint16mf2_t test_vlse16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlse16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m1_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m1_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_tum( @@ -310,7 +310,7 @@ vuint16m1_t test_vlse16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlse16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_tum( @@ -319,7 +319,7 @@ vuint16m2_t test_vlse16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlse16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_tum( @@ -328,7 +328,7 @@ vuint16m4_t test_vlse16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlse16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_tumu( @@ -337,7 +337,7 @@ vuint16m8_t test_vlse16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlse16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16mf4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_tumu( @@ -346,7 +346,7 @@ vfloat16mf4_t test_vlse16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlse16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16mf2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m1_tumu( @@ -355,7 +355,7 @@ vfloat16mf2_t test_vlse16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlse16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m1_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m1_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m2_tumu( @@ -364,7 +364,7 @@ vfloat16m1_t test_vlse16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlse16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m4_tumu( @@ -373,7 +373,7 @@ vfloat16m2_t test_vlse16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlse16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m8_tumu( @@ -382,7 +382,7 @@ vfloat16m4_t test_vlse16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlse16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_tumu( @@ -391,7 +391,7 @@ vfloat16m8_t test_vlse16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlse16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16mf4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_tumu( @@ -400,7 +400,7 @@ vint16mf4_t test_vlse16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlse16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16mf2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_tumu( @@ -409,7 +409,7 @@ vint16mf2_t test_vlse16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlse16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m1_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m1_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_tumu( @@ -418,7 +418,7 @@ vint16m1_t test_vlse16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlse16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_tumu( @@ -427,7 +427,7 @@ vint16m2_t test_vlse16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlse16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_tumu( @@ -436,7 +436,7 @@ vint16m4_t test_vlse16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlse16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_tumu( @@ -445,7 +445,7 @@ vint16m8_t test_vlse16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlse16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16mf4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_tumu( @@ -454,7 +454,7 @@ vuint16mf4_t test_vlse16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlse16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16mf2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_tumu( @@ -463,7 +463,7 @@ vuint16mf2_t test_vlse16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlse16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m1_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m1_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_tumu( @@ -472,7 +472,7 @@ vuint16m1_t test_vlse16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlse16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_tumu( @@ -481,7 +481,7 @@ vuint16m2_t test_vlse16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlse16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_tumu( @@ -490,7 +490,7 @@ vuint16m4_t test_vlse16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlse16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_mu( @@ -499,7 +499,7 @@ vuint16m8_t test_vlse16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlse16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16mf4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_mu( @@ -508,7 +508,7 @@ vfloat16mf4_t test_vlse16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlse16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16mf2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m1_mu( @@ -517,7 +517,7 @@ vfloat16mf2_t test_vlse16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlse16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m1_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m1_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m2_mu( @@ -526,7 +526,7 @@ vfloat16m1_t test_vlse16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlse16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m4_mu( @@ -535,7 +535,7 @@ vfloat16m2_t test_vlse16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlse16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_f16m8_mu( @@ -544,7 +544,7 @@ vfloat16m4_t test_vlse16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlse16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_f16m8_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_mu( @@ -553,7 +553,7 @@ vfloat16m8_t test_vlse16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlse16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16mf4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_mu( @@ -562,7 +562,7 @@ vint16mf4_t test_vlse16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlse16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16mf2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16mf2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_mu( @@ -571,7 +571,7 @@ vint16mf2_t test_vlse16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlse16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m1_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m1_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_mu( @@ -580,7 +580,7 @@ vint16m1_t test_vlse16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlse16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_mu( @@ -589,7 +589,7 @@ vint16m2_t test_vlse16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlse16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_mu( @@ -598,7 +598,7 @@ vint16m4_t test_vlse16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlse16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_i16m8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_i16m8_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_mu( @@ -607,7 +607,7 @@ vint16m8_t test_vlse16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlse16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16mf4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_mu( @@ -616,7 +616,7 @@ vuint16mf4_t test_vlse16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlse16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16mf2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16mf2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_mu( @@ -625,7 +625,7 @@ vuint16mf2_t test_vlse16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlse16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m1_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m1_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_mu( @@ -634,7 +634,7 @@ vuint16m1_t test_vlse16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlse16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_mu( @@ -643,7 +643,7 @@ vuint16m2_t test_vlse16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlse16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_mu( @@ -652,6 +652,6 @@ vuint16m4_t test_vlse16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlse16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_u16m8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse16_v_u16m8_mu(mask, maskedoff, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse32.c index e3d59fa60c29..5671f3f9efa0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlse32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32mf2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32mf2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_tu( @@ -22,7 +22,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlse32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m1_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m1_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_tu( @@ -31,7 +31,7 @@ vfloat32m1_t test_vlse32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, p // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlse32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_tu( @@ -40,7 +40,7 @@ vfloat32m2_t test_vlse32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, p // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlse32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_tu( @@ -49,7 +49,7 @@ vfloat32m4_t test_vlse32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, p // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlse32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_tu( @@ -58,7 +58,7 @@ vfloat32m8_t test_vlse32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, p // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlse32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32mf2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32mf2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_tu( @@ -67,7 +67,7 @@ vint32mf2_t test_vlse32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlse32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m1_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m1_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_tu( @@ -76,7 +76,7 @@ vint32m1_t test_vlse32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlse32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_tu( @@ -85,7 +85,7 @@ vint32m2_t test_vlse32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlse32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_tu( @@ -94,7 +94,7 @@ vint32m4_t test_vlse32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlse32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_tu( @@ -103,7 +103,7 @@ vint32m8_t test_vlse32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlse32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32mf2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32mf2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_tu( @@ -112,7 +112,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlse32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m1_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m1_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_tu( @@ -121,7 +121,7 @@ vuint32m1_t test_vlse32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlse32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_tu( @@ -130,7 +130,7 @@ vuint32m2_t test_vlse32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlse32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_tu( @@ -139,7 +139,7 @@ vuint32m4_t test_vlse32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlse32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_tum( @@ -148,7 +148,7 @@ vuint32m8_t test_vlse32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlse32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32mf2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32mf2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_tum( @@ -157,7 +157,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlse32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m1_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m1_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_tum( @@ -166,7 +166,7 @@ vfloat32m1_t test_vlse32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlse32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_tum( @@ -175,7 +175,7 @@ vfloat32m2_t test_vlse32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlse32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_tum( @@ -184,7 +184,7 @@ vfloat32m4_t test_vlse32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlse32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_tum( @@ -193,7 +193,7 @@ vfloat32m8_t test_vlse32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlse32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32mf2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32mf2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_tum( @@ -202,7 +202,7 @@ vint32mf2_t test_vlse32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlse32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m1_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m1_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_tum( @@ -211,7 +211,7 @@ vint32m1_t test_vlse32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlse32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_tum( @@ -220,7 +220,7 @@ vint32m2_t test_vlse32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlse32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_tum( @@ -229,7 +229,7 @@ vint32m4_t test_vlse32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlse32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_tum( @@ -238,7 +238,7 @@ vint32m8_t test_vlse32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlse32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32mf2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32mf2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_tum( @@ -247,7 +247,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlse32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m1_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m1_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_tum( @@ -256,7 +256,7 @@ vuint32m1_t test_vlse32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlse32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_tum( @@ -265,7 +265,7 @@ vuint32m2_t test_vlse32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlse32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_tum( @@ -274,7 +274,7 @@ vuint32m4_t test_vlse32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlse32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_tumu( @@ -283,7 +283,7 @@ vuint32m8_t test_vlse32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlse32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32mf2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32mf2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_tumu( @@ -292,7 +292,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlse32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m1_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m1_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_tumu( @@ -301,7 +301,7 @@ vfloat32m1_t test_vlse32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlse32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_tumu( @@ -310,7 +310,7 @@ vfloat32m2_t test_vlse32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlse32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_tumu( @@ -319,7 +319,7 @@ vfloat32m4_t test_vlse32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlse32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_tumu( @@ -328,7 +328,7 @@ vfloat32m8_t test_vlse32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlse32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32mf2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32mf2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_tumu( @@ -337,7 +337,7 @@ vint32mf2_t test_vlse32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlse32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m1_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m1_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_tumu( @@ -346,7 +346,7 @@ vint32m1_t test_vlse32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlse32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_tumu( @@ -355,7 +355,7 @@ vint32m2_t test_vlse32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlse32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_tumu( @@ -364,7 +364,7 @@ vint32m4_t test_vlse32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlse32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_tumu( @@ -373,7 +373,7 @@ vint32m8_t test_vlse32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlse32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32mf2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32mf2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_tumu( @@ -382,7 +382,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlse32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m1_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m1_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_tumu( @@ -391,7 +391,7 @@ vuint32m1_t test_vlse32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlse32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_tumu( @@ -400,7 +400,7 @@ vuint32m2_t test_vlse32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlse32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_tumu( @@ -409,7 +409,7 @@ vuint32m4_t test_vlse32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlse32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_mu( @@ -418,7 +418,7 @@ vuint32m8_t test_vlse32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlse32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32mf2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32mf2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_mu( @@ -427,7 +427,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlse32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m1_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m1_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_mu( @@ -436,7 +436,7 @@ vfloat32m1_t test_vlse32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlse32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_mu( @@ -445,7 +445,7 @@ vfloat32m2_t test_vlse32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlse32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_mu( @@ -454,7 +454,7 @@ vfloat32m4_t test_vlse32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlse32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_f32m8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_f32m8_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_mu( @@ -463,7 +463,7 @@ vfloat32m8_t test_vlse32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlse32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32mf2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32mf2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_mu( @@ -472,7 +472,7 @@ vint32mf2_t test_vlse32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlse32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m1_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m1_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_mu( @@ -481,7 +481,7 @@ vint32m1_t test_vlse32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlse32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_mu( @@ -490,7 +490,7 @@ vint32m2_t test_vlse32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlse32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_mu( @@ -499,7 +499,7 @@ vint32m4_t test_vlse32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlse32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_i32m8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_i32m8_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_mu( @@ -508,7 +508,7 @@ vint32m8_t test_vlse32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlse32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32mf2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32mf2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_mu( @@ -517,7 +517,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlse32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m1_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m1_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_mu( @@ -526,7 +526,7 @@ vuint32m1_t test_vlse32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlse32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_mu( @@ -535,7 +535,7 @@ vuint32m2_t test_vlse32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlse32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_mu( @@ -544,6 +544,6 @@ vuint32m4_t test_vlse32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlse32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32_v_u32m8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse32_v_u32m8_mu(mask, maskedoff, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse64.c index dc5b5bec137e..6e09776f860c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlse64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m1_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m1_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_tu( @@ -22,7 +22,7 @@ vfloat64m1_t test_vlse64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlse64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_tu( @@ -31,7 +31,7 @@ vfloat64m2_t test_vlse64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlse64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_tu( @@ -40,7 +40,7 @@ vfloat64m4_t test_vlse64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlse64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_tu( @@ -49,7 +49,7 @@ vfloat64m8_t test_vlse64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlse64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m1_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m1_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_tu( @@ -58,7 +58,7 @@ vint64m1_t test_vlse64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlse64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_tu( @@ -67,7 +67,7 @@ vint64m2_t test_vlse64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlse64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_tu( @@ -76,7 +76,7 @@ vint64m4_t test_vlse64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlse64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_tu( @@ -85,7 +85,7 @@ vint64m8_t test_vlse64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, ptr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlse64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m1_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m1_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_tu( @@ -94,7 +94,7 @@ vuint64m1_t test_vlse64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlse64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_tu( @@ -103,7 +103,7 @@ vuint64m2_t test_vlse64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlse64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_tu( @@ -112,7 +112,7 @@ vuint64m4_t test_vlse64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlse64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_tum( @@ -121,7 +121,7 @@ vuint64m8_t test_vlse64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlse64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m1_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m1_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_tum( @@ -130,7 +130,7 @@ vfloat64m1_t test_vlse64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlse64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_tum( @@ -139,7 +139,7 @@ vfloat64m2_t test_vlse64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlse64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_tum( @@ -148,7 +148,7 @@ vfloat64m4_t test_vlse64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlse64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_tum( @@ -157,7 +157,7 @@ vfloat64m8_t test_vlse64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlse64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m1_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m1_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_tum( @@ -166,7 +166,7 @@ vint64m1_t test_vlse64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlse64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_tum( @@ -175,7 +175,7 @@ vint64m2_t test_vlse64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlse64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_tum( @@ -184,7 +184,7 @@ vint64m4_t test_vlse64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlse64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_tum( @@ -193,7 +193,7 @@ vint64m8_t test_vlse64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlse64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m1_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m1_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_tum( @@ -202,7 +202,7 @@ vuint64m1_t test_vlse64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlse64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_tum( @@ -211,7 +211,7 @@ vuint64m2_t test_vlse64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlse64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_tum( @@ -220,7 +220,7 @@ vuint64m4_t test_vlse64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlse64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_tumu( @@ -229,7 +229,7 @@ vuint64m8_t test_vlse64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlse64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m1_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m1_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_tumu( @@ -238,7 +238,7 @@ vfloat64m1_t test_vlse64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlse64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_tumu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vlse64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlse64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_tumu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vlse64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlse64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_tumu( @@ -265,7 +265,7 @@ vfloat64m8_t test_vlse64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlse64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m1_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m1_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_tumu( @@ -274,7 +274,7 @@ vint64m1_t test_vlse64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlse64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_tumu( @@ -283,7 +283,7 @@ vint64m2_t test_vlse64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlse64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_tumu( @@ -292,7 +292,7 @@ vint64m4_t test_vlse64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlse64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_tumu( @@ -301,7 +301,7 @@ vint64m8_t test_vlse64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlse64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m1_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m1_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_tumu( @@ -310,7 +310,7 @@ vuint64m1_t test_vlse64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlse64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_tumu( @@ -319,7 +319,7 @@ vuint64m2_t test_vlse64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlse64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_tumu( @@ -328,7 +328,7 @@ vuint64m4_t test_vlse64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlse64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_mu( @@ -337,7 +337,7 @@ vuint64m8_t test_vlse64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlse64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m1_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m1_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_mu( @@ -346,7 +346,7 @@ vfloat64m1_t test_vlse64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlse64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_mu( @@ -355,7 +355,7 @@ vfloat64m2_t test_vlse64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlse64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_mu( @@ -364,7 +364,7 @@ vfloat64m4_t test_vlse64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlse64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_f64m8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_f64m8_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_mu( @@ -373,7 +373,7 @@ vfloat64m8_t test_vlse64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlse64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m1_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m1_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_mu( @@ -382,7 +382,7 @@ vint64m1_t test_vlse64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlse64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_mu( @@ -391,7 +391,7 @@ vint64m2_t test_vlse64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlse64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_mu( @@ -400,7 +400,7 @@ vint64m4_t test_vlse64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlse64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_i64m8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_i64m8_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_mu( @@ -409,7 +409,7 @@ vint64m8_t test_vlse64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlse64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m1_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m1_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_mu( @@ -418,7 +418,7 @@ vuint64m1_t test_vlse64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlse64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_mu( @@ -427,7 +427,7 @@ vuint64m2_t test_vlse64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlse64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_mu( @@ -436,6 +436,6 @@ vuint64m4_t test_vlse64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlse64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64_v_u64m8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse64_v_u64m8_mu(mask, maskedoff, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse8.c index c89e6f876958..974fb444f867 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlse8.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlse8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vlse8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, ptrdi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlse8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_tu( @@ -30,7 +30,7 @@ vint8mf4_t test_vlse8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, ptrdi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlse8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_tu( @@ -39,7 +39,7 @@ vint8mf2_t test_vlse8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, ptrdi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlse8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m1_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m1_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vlse8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlse8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_tu( @@ -57,7 +57,7 @@ vint8m2_t test_vlse8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlse8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_tu( @@ -66,7 +66,7 @@ vint8m4_t test_vlse8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlse8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_tu( @@ -75,7 +75,7 @@ vint8m8_t test_vlse8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, ptrdiff_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlse8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_tu( @@ -84,7 +84,7 @@ vuint8mf8_t test_vlse8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, pt // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlse8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_tu( @@ -93,7 +93,7 @@ vuint8mf4_t test_vlse8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, pt // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlse8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_tu( @@ -102,7 +102,7 @@ vuint8mf2_t test_vlse8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, pt // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlse8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m1_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m1_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_tu( @@ -111,7 +111,7 @@ vuint8m1_t test_vlse8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, ptrdi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlse8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m2_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m2_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_tu( @@ -120,7 +120,7 @@ vuint8m2_t test_vlse8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, ptrdi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlse8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m4_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m4_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m4_t test_vlse8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, ptrdi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlse8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m8_tu(maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m8_tu(maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_tum( @@ -138,7 +138,7 @@ vuint8m8_t test_vlse8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, ptrdi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlse8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_tum( @@ -147,7 +147,7 @@ vint8mf8_t test_vlse8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlse8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_tum( @@ -156,7 +156,7 @@ vint8mf4_t test_vlse8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlse8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_tum( @@ -165,7 +165,7 @@ vint8mf2_t test_vlse8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlse8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m1_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m1_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_tum( @@ -174,7 +174,7 @@ vint8m1_t test_vlse8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlse8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_tum( @@ -183,7 +183,7 @@ vint8m2_t test_vlse8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlse8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_tum( @@ -192,7 +192,7 @@ vint8m4_t test_vlse8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlse8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_tum( @@ -201,7 +201,7 @@ vint8m8_t test_vlse8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlse8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_tum( @@ -210,7 +210,7 @@ vuint8mf8_t test_vlse8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlse8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_tum( @@ -219,7 +219,7 @@ vuint8mf4_t test_vlse8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlse8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_tum( @@ -228,7 +228,7 @@ vuint8mf2_t test_vlse8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlse8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m1_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m1_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_tum( @@ -237,7 +237,7 @@ vuint8m1_t test_vlse8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlse8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m2_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m2_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_tum( @@ -246,7 +246,7 @@ vuint8m2_t test_vlse8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlse8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m4_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m4_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_tum( @@ -255,7 +255,7 @@ vuint8m4_t test_vlse8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlse8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m8_tum(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m8_tum(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_tumu( @@ -264,7 +264,7 @@ vuint8m8_t test_vlse8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlse8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_tumu( @@ -273,7 +273,7 @@ vint8mf8_t test_vlse8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlse8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_tumu( @@ -282,7 +282,7 @@ vint8mf4_t test_vlse8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlse8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_tumu( @@ -291,7 +291,7 @@ vint8mf2_t test_vlse8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlse8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m1_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m1_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_tumu( @@ -300,7 +300,7 @@ vint8m1_t test_vlse8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlse8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_tumu( @@ -309,7 +309,7 @@ vint8m2_t test_vlse8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlse8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_tumu( @@ -318,7 +318,7 @@ vint8m4_t test_vlse8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlse8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_tumu( @@ -327,7 +327,7 @@ vint8m8_t test_vlse8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlse8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_tumu( @@ -336,7 +336,7 @@ vuint8mf8_t test_vlse8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlse8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_tumu( @@ -345,7 +345,7 @@ vuint8mf4_t test_vlse8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlse8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_tumu( @@ -354,7 +354,7 @@ vuint8mf2_t test_vlse8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlse8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m1_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m1_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_tumu( @@ -363,7 +363,7 @@ vuint8m1_t test_vlse8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlse8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m2_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m2_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_tumu( @@ -372,7 +372,7 @@ vuint8m2_t test_vlse8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlse8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m4_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m4_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_tumu( @@ -381,7 +381,7 @@ vuint8m4_t test_vlse8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlse8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m8_tumu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m8_tumu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_mu( @@ -390,7 +390,7 @@ vuint8m8_t test_vlse8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlse8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf8_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_mu( @@ -399,7 +399,7 @@ vint8mf8_t test_vlse8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlse8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_mu( @@ -408,7 +408,7 @@ vint8mf4_t test_vlse8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlse8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8mf2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8mf2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_mu( @@ -417,7 +417,7 @@ vint8mf2_t test_vlse8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlse8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m1_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m1_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_mu( @@ -426,7 +426,7 @@ vint8m1_t test_vlse8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlse8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_mu( @@ -435,7 +435,7 @@ vint8m2_t test_vlse8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlse8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_mu( @@ -444,7 +444,7 @@ vint8m4_t test_vlse8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlse8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_i8m8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_i8m8_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_mu( @@ -453,7 +453,7 @@ vint8m8_t test_vlse8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlse8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf8_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_mu( @@ -462,7 +462,7 @@ vuint8mf8_t test_vlse8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlse8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_mu( @@ -471,7 +471,7 @@ vuint8mf4_t test_vlse8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlse8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8mf2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8mf2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_mu( @@ -480,7 +480,7 @@ vuint8mf2_t test_vlse8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlse8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m1_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m1_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_mu( @@ -489,7 +489,7 @@ vuint8m1_t test_vlse8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlse8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m2_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m2_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_mu( @@ -498,7 +498,7 @@ vuint8m2_t test_vlse8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlse8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m4_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m4_mu(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_mu( @@ -507,6 +507,6 @@ vuint8m4_t test_vlse8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlse8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8_v_u8m8_mu(mask, maskedoff, base, bstride, vl); + return __riscv_vlse8_v_u8m8_mu(mask, maskedoff, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16.c index c3ec6f0b2ead..a97de2450aed 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_tu( @@ -30,7 +30,7 @@ void test_vlseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_tu( @@ -43,7 +43,7 @@ void test_vlseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_tu( @@ -56,7 +56,7 @@ void test_vlseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_tu( @@ -69,7 +69,7 @@ void test_vlseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_tu( @@ -82,7 +82,7 @@ void test_vlseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_tu( @@ -95,7 +95,7 @@ void test_vlseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_tu( @@ -108,7 +108,7 @@ void test_vlseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_tu( @@ -121,7 +121,7 @@ void test_vlseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_tu( @@ -134,7 +134,7 @@ void test_vlseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_tu( @@ -147,7 +147,7 @@ void test_vlseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_tu( @@ -160,7 +160,7 @@ void test_vlseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_tu( @@ -173,7 +173,7 @@ void test_vlseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_tu( @@ -186,7 +186,7 @@ void test_vlseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_tu( @@ -199,7 +199,7 @@ void test_vlseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_tum( @@ -212,7 +212,7 @@ void test_vlseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_tum( @@ -225,7 +225,7 @@ void test_vlseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_tum( @@ -238,7 +238,7 @@ void test_vlseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_tum( @@ -251,7 +251,7 @@ void test_vlseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_tum( @@ -264,7 +264,7 @@ void test_vlseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_tum( @@ -277,7 +277,7 @@ void test_vlseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_tum( @@ -290,7 +290,7 @@ void test_vlseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_tum( @@ -303,7 +303,7 @@ void test_vlseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_tum( @@ -316,7 +316,7 @@ void test_vlseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_tum( @@ -329,7 +329,7 @@ void test_vlseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_tum( @@ -342,7 +342,7 @@ void test_vlseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_tum( @@ -355,7 +355,7 @@ void test_vlseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_tum( @@ -368,7 +368,7 @@ void test_vlseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_tum( @@ -381,7 +381,7 @@ void test_vlseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_tum( @@ -394,7 +394,7 @@ void test_vlseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_tumu( @@ -407,7 +407,7 @@ void test_vlseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_tumu( @@ -420,7 +420,7 @@ void test_vlseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_tumu( @@ -433,7 +433,7 @@ void test_vlseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_tumu( @@ -446,7 +446,7 @@ void test_vlseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_tumu( @@ -459,7 +459,7 @@ void test_vlseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_tumu( @@ -472,7 +472,7 @@ void test_vlseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_tumu( @@ -485,7 +485,7 @@ void test_vlseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_tumu( @@ -498,7 +498,7 @@ void test_vlseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_tumu( @@ -511,7 +511,7 @@ void test_vlseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_tumu( @@ -524,7 +524,7 @@ void test_vlseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_tumu( @@ -537,7 +537,7 @@ void test_vlseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_tumu( @@ -550,7 +550,7 @@ void test_vlseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_tumu( @@ -563,7 +563,7 @@ void test_vlseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_tumu( @@ -576,7 +576,7 @@ void test_vlseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_tumu( @@ -589,7 +589,7 @@ void test_vlseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_mu( @@ -602,7 +602,7 @@ void test_vlseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_mu( @@ -615,7 +615,7 @@ void test_vlseg2e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_mu( @@ -628,7 +628,7 @@ void test_vlseg2e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_mu( @@ -641,7 +641,7 @@ void test_vlseg2e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_mu( @@ -654,7 +654,7 @@ void test_vlseg2e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_mu( @@ -667,7 +667,7 @@ void test_vlseg2e16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_mu( @@ -680,7 +680,7 @@ void test_vlseg2e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_mu( @@ -693,7 +693,7 @@ void test_vlseg2e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_mu( @@ -706,7 +706,7 @@ void test_vlseg2e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_mu( @@ -719,7 +719,7 @@ void test_vlseg2e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_mu( @@ -732,7 +732,7 @@ void test_vlseg2e16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_mu( @@ -745,7 +745,7 @@ void test_vlseg2e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_mu( @@ -758,7 +758,7 @@ void test_vlseg2e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_mu( @@ -771,7 +771,7 @@ void test_vlseg2e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_mu( @@ -784,6 +784,6 @@ void test_vlseg2e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e16_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c index 27045cabf7ee..8d1616b7710e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e16ff.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_tu( @@ -34,7 +34,7 @@ void test_vlseg2e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_tu( @@ -49,7 +49,7 @@ void test_vlseg2e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_tu( @@ -64,7 +64,7 @@ void test_vlseg2e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_tu( @@ -79,7 +79,7 @@ void test_vlseg2e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_tu( @@ -94,7 +94,7 @@ void test_vlseg2e16ff_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_tu( @@ -109,7 +109,7 @@ void test_vlseg2e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_tu( @@ -124,7 +124,7 @@ void test_vlseg2e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_tu( @@ -139,7 +139,7 @@ void test_vlseg2e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_tu( @@ -154,7 +154,7 @@ void test_vlseg2e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_tu( @@ -169,7 +169,7 @@ void test_vlseg2e16ff_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_tu( @@ -184,7 +184,7 @@ void test_vlseg2e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_tu( @@ -199,7 +199,7 @@ void test_vlseg2e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_tu( @@ -214,7 +214,7 @@ void test_vlseg2e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_tu( @@ -229,7 +229,7 @@ void test_vlseg2e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_tum( @@ -244,7 +244,7 @@ void test_vlseg2e16ff_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_tum( @@ -259,7 +259,7 @@ void test_vlseg2e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_tum( @@ -274,7 +274,7 @@ void test_vlseg2e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_tum( @@ -289,7 +289,7 @@ void test_vlseg2e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_tum( @@ -304,7 +304,7 @@ void test_vlseg2e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_tum( @@ -319,7 +319,7 @@ void test_vlseg2e16ff_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_tum( @@ -334,7 +334,7 @@ void test_vlseg2e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_tum( @@ -349,7 +349,7 @@ void test_vlseg2e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_tum( @@ -364,7 +364,7 @@ void test_vlseg2e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_tum( @@ -379,7 +379,7 @@ void test_vlseg2e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_tum( @@ -394,7 +394,7 @@ void test_vlseg2e16ff_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_tum( @@ -409,7 +409,7 @@ void test_vlseg2e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_tum( @@ -424,7 +424,7 @@ void test_vlseg2e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_tum( @@ -439,7 +439,7 @@ void test_vlseg2e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_tum( @@ -454,7 +454,7 @@ void test_vlseg2e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_tumu( @@ -469,7 +469,7 @@ void test_vlseg2e16ff_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_tumu( @@ -484,7 +484,7 @@ void test_vlseg2e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_tumu( @@ -499,7 +499,7 @@ void test_vlseg2e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3 // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_tumu( @@ -514,7 +514,7 @@ void test_vlseg2e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_tumu( @@ -529,7 +529,7 @@ void test_vlseg2e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_tumu( @@ -544,7 +544,7 @@ void test_vlseg2e16ff_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_tumu( @@ -559,7 +559,7 @@ void test_vlseg2e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_tumu( @@ -574,7 +574,7 @@ void test_vlseg2e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_tumu( @@ -589,7 +589,7 @@ void test_vlseg2e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_tumu( @@ -604,7 +604,7 @@ void test_vlseg2e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_tumu( @@ -619,7 +619,7 @@ void test_vlseg2e16ff_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_tumu( @@ -634,7 +634,7 @@ void test_vlseg2e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_tumu( @@ -649,7 +649,7 @@ void test_vlseg2e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_tumu( @@ -664,7 +664,7 @@ void test_vlseg2e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_tumu( @@ -679,7 +679,7 @@ void test_vlseg2e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_mu( @@ -694,7 +694,7 @@ void test_vlseg2e16ff_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_mu( @@ -709,7 +709,7 @@ void test_vlseg2e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_mu( @@ -724,7 +724,7 @@ void test_vlseg2e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_mu( @@ -739,7 +739,7 @@ void test_vlseg2e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_mu( @@ -754,7 +754,7 @@ void test_vlseg2e16ff_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_mu( @@ -769,7 +769,7 @@ void test_vlseg2e16ff_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_mu( @@ -784,7 +784,7 @@ void test_vlseg2e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_mu( @@ -799,7 +799,7 @@ void test_vlseg2e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_mu( @@ -814,7 +814,7 @@ void test_vlseg2e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_mu( @@ -829,7 +829,7 @@ void test_vlseg2e16ff_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_mu( @@ -844,7 +844,7 @@ void test_vlseg2e16ff_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_mu( @@ -859,7 +859,7 @@ void test_vlseg2e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_mu( @@ -874,7 +874,7 @@ void test_vlseg2e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_mu( @@ -889,7 +889,7 @@ void test_vlseg2e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_mu( @@ -904,6 +904,6 @@ void test_vlseg2e16ff_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e16ff_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e16ff_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32.c index 754f216f856f..f96de9ce878c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_tu( @@ -30,7 +30,7 @@ void test_vlseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_tu( @@ -43,7 +43,7 @@ void test_vlseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_tu( @@ -56,7 +56,7 @@ void test_vlseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_tu( @@ -69,7 +69,7 @@ void test_vlseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_tu( @@ -82,7 +82,7 @@ void test_vlseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_tu( @@ -95,7 +95,7 @@ void test_vlseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_tu( @@ -108,7 +108,7 @@ void test_vlseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tu( @@ -121,7 +121,7 @@ void test_vlseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_tu( @@ -134,7 +134,7 @@ void test_vlseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_tu( @@ -147,7 +147,7 @@ void test_vlseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_tu( @@ -160,7 +160,7 @@ void test_vlseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_tum( @@ -173,7 +173,7 @@ void test_vlseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_tum( @@ -186,7 +186,7 @@ void test_vlseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_tum( @@ -199,7 +199,7 @@ void test_vlseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_tum( @@ -212,7 +212,7 @@ void test_vlseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_tum( @@ -225,7 +225,7 @@ void test_vlseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_tum( @@ -238,7 +238,7 @@ void test_vlseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_tum( @@ -251,7 +251,7 @@ void test_vlseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_tum( @@ -264,7 +264,7 @@ void test_vlseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tum( @@ -277,7 +277,7 @@ void test_vlseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_tum( @@ -290,7 +290,7 @@ void test_vlseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_tum( @@ -303,7 +303,7 @@ void test_vlseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_tum( @@ -316,7 +316,7 @@ void test_vlseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_tumu( @@ -329,7 +329,7 @@ void test_vlseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_tumu( @@ -342,7 +342,7 @@ void test_vlseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_tumu( @@ -355,7 +355,7 @@ void test_vlseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_tumu( @@ -368,7 +368,7 @@ void test_vlseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_tumu( @@ -381,7 +381,7 @@ void test_vlseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_tumu( @@ -394,7 +394,7 @@ void test_vlseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_tumu( @@ -407,7 +407,7 @@ void test_vlseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_tumu( @@ -420,7 +420,7 @@ void test_vlseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_tumu( @@ -433,7 +433,7 @@ void test_vlseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_tumu( @@ -446,7 +446,7 @@ void test_vlseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_tumu( @@ -459,7 +459,7 @@ void test_vlseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_tumu( @@ -472,7 +472,7 @@ void test_vlseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_mu( @@ -485,7 +485,7 @@ void test_vlseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_mu( @@ -498,7 +498,7 @@ void test_vlseg2e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_mu( @@ -511,7 +511,7 @@ void test_vlseg2e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_mu( @@ -524,7 +524,7 @@ void test_vlseg2e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_mu( @@ -537,7 +537,7 @@ void test_vlseg2e32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_mu( @@ -550,7 +550,7 @@ void test_vlseg2e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_mu( @@ -563,7 +563,7 @@ void test_vlseg2e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_mu( @@ -576,7 +576,7 @@ void test_vlseg2e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_mu( @@ -589,7 +589,7 @@ void test_vlseg2e32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_mu( @@ -602,7 +602,7 @@ void test_vlseg2e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_mu( @@ -615,7 +615,7 @@ void test_vlseg2e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_mu( @@ -628,6 +628,6 @@ void test_vlseg2e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e32_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c index 9b7957443b2c..0bb12e43e037 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e32ff.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_tu( @@ -34,7 +34,7 @@ void test_vlseg2e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_tu( @@ -49,7 +49,7 @@ void test_vlseg2e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_tu( @@ -64,7 +64,7 @@ void test_vlseg2e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_tu( @@ -79,7 +79,7 @@ void test_vlseg2e32ff_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_tu( @@ -94,7 +94,7 @@ void test_vlseg2e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_tu( @@ -109,7 +109,7 @@ void test_vlseg2e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_tu( @@ -124,7 +124,7 @@ void test_vlseg2e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tu( @@ -139,7 +139,7 @@ void test_vlseg2e32ff_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_tu( @@ -154,7 +154,7 @@ void test_vlseg2e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_tu( @@ -169,7 +169,7 @@ void test_vlseg2e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_tu( @@ -184,7 +184,7 @@ void test_vlseg2e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_tum( @@ -199,7 +199,7 @@ void test_vlseg2e32ff_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_tum( @@ -214,7 +214,7 @@ void test_vlseg2e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_tum( @@ -229,7 +229,7 @@ void test_vlseg2e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_tum( @@ -244,7 +244,7 @@ void test_vlseg2e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_tum( @@ -259,7 +259,7 @@ void test_vlseg2e32ff_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_tum( @@ -274,7 +274,7 @@ void test_vlseg2e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_tum( @@ -289,7 +289,7 @@ void test_vlseg2e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_tum( @@ -304,7 +304,7 @@ void test_vlseg2e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tum( @@ -319,7 +319,7 @@ void test_vlseg2e32ff_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_tum( @@ -334,7 +334,7 @@ void test_vlseg2e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_tum( @@ -349,7 +349,7 @@ void test_vlseg2e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_tum( @@ -364,7 +364,7 @@ void test_vlseg2e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_tumu( @@ -379,7 +379,7 @@ void test_vlseg2e32ff_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_tumu( @@ -394,7 +394,7 @@ void test_vlseg2e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_tumu( @@ -409,7 +409,7 @@ void test_vlseg2e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_tumu( @@ -424,7 +424,7 @@ void test_vlseg2e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_tumu( @@ -439,7 +439,7 @@ void test_vlseg2e32ff_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_tumu( @@ -454,7 +454,7 @@ void test_vlseg2e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_tumu( @@ -469,7 +469,7 @@ void test_vlseg2e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_tumu( @@ -484,7 +484,7 @@ void test_vlseg2e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tumu( @@ -499,7 +499,7 @@ void test_vlseg2e32ff_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_tumu( @@ -514,7 +514,7 @@ void test_vlseg2e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_tumu( @@ -529,7 +529,7 @@ void test_vlseg2e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_tumu( @@ -544,7 +544,7 @@ void test_vlseg2e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_mu( @@ -559,7 +559,7 @@ void test_vlseg2e32ff_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_mu( @@ -574,7 +574,7 @@ void test_vlseg2e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_mu( @@ -589,7 +589,7 @@ void test_vlseg2e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_mu( @@ -604,7 +604,7 @@ void test_vlseg2e32ff_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_mu( @@ -619,7 +619,7 @@ void test_vlseg2e32ff_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_mu( @@ -634,7 +634,7 @@ void test_vlseg2e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_mu( @@ -649,7 +649,7 @@ void test_vlseg2e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_mu( @@ -664,7 +664,7 @@ void test_vlseg2e32ff_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_mu( @@ -679,7 +679,7 @@ void test_vlseg2e32ff_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_mu( @@ -694,7 +694,7 @@ void test_vlseg2e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_mu( @@ -709,7 +709,7 @@ void test_vlseg2e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_mu( @@ -724,6 +724,6 @@ void test_vlseg2e32ff_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e32ff_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e32ff_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64.c index aa7c9e678fb0..eac699a25d3f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_tu( @@ -30,7 +30,7 @@ void test_vlseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_tu( @@ -43,7 +43,7 @@ void test_vlseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_tu( @@ -56,7 +56,7 @@ void test_vlseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_tu( @@ -69,7 +69,7 @@ void test_vlseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_tu( @@ -82,7 +82,7 @@ void test_vlseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_tu( @@ -95,7 +95,7 @@ void test_vlseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_tu( @@ -108,7 +108,7 @@ void test_vlseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_tu( @@ -121,7 +121,7 @@ void test_vlseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_tum( @@ -134,7 +134,7 @@ void test_vlseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_tum( @@ -147,7 +147,7 @@ void test_vlseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_tum( @@ -160,7 +160,7 @@ void test_vlseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_tum( @@ -173,7 +173,7 @@ void test_vlseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_tum( @@ -186,7 +186,7 @@ void test_vlseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_tum( @@ -199,7 +199,7 @@ void test_vlseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_tum( @@ -212,7 +212,7 @@ void test_vlseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_tum( @@ -225,7 +225,7 @@ void test_vlseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_tum( @@ -238,7 +238,7 @@ void test_vlseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_tumu( @@ -251,7 +251,7 @@ void test_vlseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_tumu( @@ -264,7 +264,7 @@ void test_vlseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_tumu( @@ -277,7 +277,7 @@ void test_vlseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_tumu( @@ -290,7 +290,7 @@ void test_vlseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_tumu( @@ -303,7 +303,7 @@ void test_vlseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_tumu( @@ -316,7 +316,7 @@ void test_vlseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_tumu( @@ -329,7 +329,7 @@ void test_vlseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_tumu( @@ -342,7 +342,7 @@ void test_vlseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_tumu( @@ -355,7 +355,7 @@ void test_vlseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_mu( @@ -368,7 +368,7 @@ void test_vlseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_mu( @@ -381,7 +381,7 @@ void test_vlseg2e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_mu( @@ -394,7 +394,7 @@ void test_vlseg2e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_mu( @@ -407,7 +407,7 @@ void test_vlseg2e64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_mu( @@ -420,7 +420,7 @@ void test_vlseg2e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_mu( @@ -433,7 +433,7 @@ void test_vlseg2e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_mu( @@ -446,7 +446,7 @@ void test_vlseg2e64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_mu( @@ -459,7 +459,7 @@ void test_vlseg2e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_mu( @@ -472,6 +472,6 @@ void test_vlseg2e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e64_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c index 62f08bdf19e9..d6e5fa903d5c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e64ff.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_tu( @@ -34,7 +34,7 @@ void test_vlseg2e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_tu( @@ -49,7 +49,7 @@ void test_vlseg2e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_tu( @@ -64,7 +64,7 @@ void test_vlseg2e64ff_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_ // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_tu( @@ -79,7 +79,7 @@ void test_vlseg2e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_tu( @@ -94,7 +94,7 @@ void test_vlseg2e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_tu( @@ -109,7 +109,7 @@ void test_vlseg2e64ff_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_tu( @@ -124,7 +124,7 @@ void test_vlseg2e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_tu( @@ -139,7 +139,7 @@ void test_vlseg2e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_tum( @@ -154,7 +154,7 @@ void test_vlseg2e64ff_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_tum( @@ -169,7 +169,7 @@ void test_vlseg2e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_tum( @@ -184,7 +184,7 @@ void test_vlseg2e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_tum( @@ -199,7 +199,7 @@ void test_vlseg2e64ff_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_tum( @@ -214,7 +214,7 @@ void test_vlseg2e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_tum( @@ -229,7 +229,7 @@ void test_vlseg2e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_tum( @@ -244,7 +244,7 @@ void test_vlseg2e64ff_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_tum( @@ -259,7 +259,7 @@ void test_vlseg2e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_tum( @@ -274,7 +274,7 @@ void test_vlseg2e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_tumu( @@ -289,7 +289,7 @@ void test_vlseg2e64ff_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_tumu( @@ -304,7 +304,7 @@ void test_vlseg2e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_tumu( @@ -319,7 +319,7 @@ void test_vlseg2e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_tumu( @@ -334,7 +334,7 @@ void test_vlseg2e64ff_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_tumu( @@ -349,7 +349,7 @@ void test_vlseg2e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_tumu( @@ -364,7 +364,7 @@ void test_vlseg2e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_tumu( @@ -379,7 +379,7 @@ void test_vlseg2e64ff_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_tumu( @@ -394,7 +394,7 @@ void test_vlseg2e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_tumu( @@ -409,7 +409,7 @@ void test_vlseg2e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_mu( @@ -424,7 +424,7 @@ void test_vlseg2e64ff_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_mu( @@ -439,7 +439,7 @@ void test_vlseg2e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_mu( @@ -454,7 +454,7 @@ void test_vlseg2e64ff_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_mu( @@ -469,7 +469,7 @@ void test_vlseg2e64ff_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_mu( @@ -484,7 +484,7 @@ void test_vlseg2e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_mu( @@ -499,7 +499,7 @@ void test_vlseg2e64ff_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_mu( @@ -514,7 +514,7 @@ void test_vlseg2e64ff_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_mu( @@ -529,7 +529,7 @@ void test_vlseg2e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_mu( @@ -544,6 +544,6 @@ void test_vlseg2e64ff_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e64ff_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e64ff_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8.c index 1fad72e94d40..235effa4f09e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_tu( @@ -29,7 +29,7 @@ void test_vlseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedo // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_tu( @@ -42,7 +42,7 @@ void test_vlseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedo // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_tu( @@ -55,7 +55,7 @@ void test_vlseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedo // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_tu( @@ -68,7 +68,7 @@ void test_vlseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_tu( @@ -81,7 +81,7 @@ void test_vlseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_tu( @@ -94,7 +94,7 @@ void test_vlseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_tu( @@ -107,7 +107,7 @@ void test_vlseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_tu( @@ -120,7 +120,7 @@ void test_vlseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_tu( @@ -133,7 +133,7 @@ void test_vlseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_tu( @@ -146,7 +146,7 @@ void test_vlseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedof // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_tu( @@ -159,7 +159,7 @@ void test_vlseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedof // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_tum( @@ -172,7 +172,7 @@ void test_vlseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedof // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_tum( @@ -185,7 +185,7 @@ void test_vlseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_tum( @@ -198,7 +198,7 @@ void test_vlseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_tum( @@ -211,7 +211,7 @@ void test_vlseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_tum( @@ -224,7 +224,7 @@ void test_vlseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_tum( @@ -237,7 +237,7 @@ void test_vlseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_tum( @@ -250,7 +250,7 @@ void test_vlseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_tum( @@ -263,7 +263,7 @@ void test_vlseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_tum( @@ -276,7 +276,7 @@ void test_vlseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_tum( @@ -289,7 +289,7 @@ void test_vlseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_tum( @@ -302,7 +302,7 @@ void test_vlseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vui // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_tum( @@ -315,7 +315,7 @@ void test_vlseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vui // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_tumu( @@ -328,7 +328,7 @@ void test_vlseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vui // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_tumu( @@ -341,7 +341,7 @@ void test_vlseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_tumu( @@ -354,7 +354,7 @@ void test_vlseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_tumu( @@ -367,7 +367,7 @@ void test_vlseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_tumu( @@ -380,7 +380,7 @@ void test_vlseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_tumu( @@ -393,7 +393,7 @@ void test_vlseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_tumu( @@ -406,7 +406,7 @@ void test_vlseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_tumu( @@ -419,7 +419,7 @@ void test_vlseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_tumu( @@ -432,7 +432,7 @@ void test_vlseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_tumu( @@ -445,7 +445,7 @@ void test_vlseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_tumu( @@ -458,7 +458,7 @@ void test_vlseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vu // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_tumu( @@ -471,7 +471,7 @@ void test_vlseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vu // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_mu( @@ -484,7 +484,7 @@ void test_vlseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vu // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_mu( @@ -497,7 +497,7 @@ void test_vlseg2e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_mu( @@ -510,7 +510,7 @@ void test_vlseg2e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_mu( @@ -523,7 +523,7 @@ void test_vlseg2e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_mu( @@ -536,7 +536,7 @@ void test_vlseg2e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_mu( @@ -549,7 +549,7 @@ void test_vlseg2e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_mu( @@ -562,7 +562,7 @@ void test_vlseg2e8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_mu( @@ -575,7 +575,7 @@ void test_vlseg2e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_mu( @@ -588,7 +588,7 @@ void test_vlseg2e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_mu( @@ -601,7 +601,7 @@ void test_vlseg2e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_mu( @@ -614,7 +614,7 @@ void test_vlseg2e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuin // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_mu( @@ -627,6 +627,6 @@ void test_vlseg2e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuin // CHECK-RV64-NEXT: ret void // void test_vlseg2e8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); + return __riscv_vlseg2e8_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c index f079659e2429..04271c00b186 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg2e8ff.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_tu( @@ -34,7 +34,7 @@ void test_vlseg2e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maske // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_tu( @@ -49,7 +49,7 @@ void test_vlseg2e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maske // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_tu( @@ -64,7 +64,7 @@ void test_vlseg2e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maske // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_tu( @@ -79,7 +79,7 @@ void test_vlseg2e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_tu( @@ -94,7 +94,7 @@ void test_vlseg2e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_tu( @@ -109,7 +109,7 @@ void test_vlseg2e8ff_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_tu( @@ -124,7 +124,7 @@ void test_vlseg2e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_tu( @@ -139,7 +139,7 @@ void test_vlseg2e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_tu( @@ -154,7 +154,7 @@ void test_vlseg2e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_tu( @@ -169,7 +169,7 @@ void test_vlseg2e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_tu( @@ -184,7 +184,7 @@ void test_vlseg2e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_tum( @@ -199,7 +199,7 @@ void test_vlseg2e8ff_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t masked // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_tum( @@ -214,7 +214,7 @@ void test_vlseg2e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_tum( @@ -229,7 +229,7 @@ void test_vlseg2e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_tum( @@ -244,7 +244,7 @@ void test_vlseg2e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_tum( @@ -259,7 +259,7 @@ void test_vlseg2e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_tum( @@ -274,7 +274,7 @@ void test_vlseg2e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_tum( @@ -289,7 +289,7 @@ void test_vlseg2e8ff_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_tum( @@ -304,7 +304,7 @@ void test_vlseg2e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_tum( @@ -319,7 +319,7 @@ void test_vlseg2e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_tum( @@ -334,7 +334,7 @@ void test_vlseg2e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_tum( @@ -349,7 +349,7 @@ void test_vlseg2e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_tum( @@ -364,7 +364,7 @@ void test_vlseg2e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_tumu( @@ -379,7 +379,7 @@ void test_vlseg2e8ff_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_tumu( @@ -394,7 +394,7 @@ void test_vlseg2e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_tumu( @@ -409,7 +409,7 @@ void test_vlseg2e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_tumu( @@ -424,7 +424,7 @@ void test_vlseg2e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_tumu( @@ -439,7 +439,7 @@ void test_vlseg2e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_tumu( @@ -454,7 +454,7 @@ void test_vlseg2e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_tumu( @@ -469,7 +469,7 @@ void test_vlseg2e8ff_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_tumu( @@ -484,7 +484,7 @@ void test_vlseg2e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_tumu( @@ -499,7 +499,7 @@ void test_vlseg2e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_tumu( @@ -514,7 +514,7 @@ void test_vlseg2e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_tumu( @@ -529,7 +529,7 @@ void test_vlseg2e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_tumu( @@ -544,7 +544,7 @@ void test_vlseg2e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_mu( @@ -559,7 +559,7 @@ void test_vlseg2e8ff_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_mu( @@ -574,7 +574,7 @@ void test_vlseg2e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_mu( @@ -589,7 +589,7 @@ void test_vlseg2e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_mu( @@ -604,7 +604,7 @@ void test_vlseg2e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_mu( @@ -619,7 +619,7 @@ void test_vlseg2e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_mu( @@ -634,7 +634,7 @@ void test_vlseg2e8ff_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_mu( @@ -649,7 +649,7 @@ void test_vlseg2e8ff_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_mu( @@ -664,7 +664,7 @@ void test_vlseg2e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_mu( @@ -679,7 +679,7 @@ void test_vlseg2e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_mu( @@ -694,7 +694,7 @@ void test_vlseg2e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_mu( @@ -709,7 +709,7 @@ void test_vlseg2e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vu // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_mu( @@ -724,6 +724,6 @@ void test_vlseg2e8ff_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vu // CHECK-RV64-NEXT: ret void // void test_vlseg2e8ff_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); + return __riscv_vlseg2e8ff_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16.c index 5f5d3c8981ff..9febc1297e49 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_tu( @@ -34,7 +34,7 @@ void test_vlseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_tu( @@ -49,7 +49,7 @@ void test_vlseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_tu( @@ -64,7 +64,7 @@ void test_vlseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_tu( @@ -79,7 +79,7 @@ void test_vlseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_tu( @@ -94,7 +94,7 @@ void test_vlseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_tu( @@ -109,7 +109,7 @@ void test_vlseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_tu( @@ -124,7 +124,7 @@ void test_vlseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_tu( @@ -139,7 +139,7 @@ void test_vlseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_tu( @@ -154,7 +154,7 @@ void test_vlseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_tu( @@ -169,7 +169,7 @@ void test_vlseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_tu( @@ -184,7 +184,7 @@ void test_vlseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_tum( @@ -199,7 +199,7 @@ void test_vlseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_tum( @@ -214,7 +214,7 @@ void test_vlseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_tum( @@ -229,7 +229,7 @@ void test_vlseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_tum( @@ -244,7 +244,7 @@ void test_vlseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_tum( @@ -259,7 +259,7 @@ void test_vlseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_tum( @@ -274,7 +274,7 @@ void test_vlseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_tum( @@ -289,7 +289,7 @@ void test_vlseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_tum( @@ -304,7 +304,7 @@ void test_vlseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_tum( @@ -319,7 +319,7 @@ void test_vlseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_tum( @@ -334,7 +334,7 @@ void test_vlseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_tum( @@ -349,7 +349,7 @@ void test_vlseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_tum( @@ -364,7 +364,7 @@ void test_vlseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_tumu( @@ -379,7 +379,7 @@ void test_vlseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_tumu( @@ -394,7 +394,7 @@ void test_vlseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_tumu( @@ -409,7 +409,7 @@ void test_vlseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_tumu( @@ -424,7 +424,7 @@ void test_vlseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_tumu( @@ -439,7 +439,7 @@ void test_vlseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_tumu( @@ -454,7 +454,7 @@ void test_vlseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_tumu( @@ -469,7 +469,7 @@ void test_vlseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_tumu( @@ -484,7 +484,7 @@ void test_vlseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_tumu( @@ -499,7 +499,7 @@ void test_vlseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_tumu( @@ -514,7 +514,7 @@ void test_vlseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_tumu( @@ -529,7 +529,7 @@ void test_vlseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_tumu( @@ -544,7 +544,7 @@ void test_vlseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_mu( @@ -559,7 +559,7 @@ void test_vlseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_mu( @@ -574,7 +574,7 @@ void test_vlseg3e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_mu( @@ -589,7 +589,7 @@ void test_vlseg3e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_mu( @@ -604,7 +604,7 @@ void test_vlseg3e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_mu( @@ -619,7 +619,7 @@ void test_vlseg3e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_mu( @@ -634,7 +634,7 @@ void test_vlseg3e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_mu( @@ -649,7 +649,7 @@ void test_vlseg3e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_mu( @@ -664,7 +664,7 @@ void test_vlseg3e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_mu( @@ -679,7 +679,7 @@ void test_vlseg3e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_mu( @@ -694,7 +694,7 @@ void test_vlseg3e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_mu( @@ -709,7 +709,7 @@ void test_vlseg3e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_mu( @@ -724,6 +724,6 @@ void test_vlseg3e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e16_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c index 587f4385cb94..a89ac22350b3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e16ff.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_tu( @@ -38,7 +38,7 @@ void test_vlseg3e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_tu( @@ -55,7 +55,7 @@ void test_vlseg3e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_tu( @@ -72,7 +72,7 @@ void test_vlseg3e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_tu( @@ -89,7 +89,7 @@ void test_vlseg3e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_tu( @@ -106,7 +106,7 @@ void test_vlseg3e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_tu( @@ -123,7 +123,7 @@ void test_vlseg3e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_tu( @@ -140,7 +140,7 @@ void test_vlseg3e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_tu( @@ -157,7 +157,7 @@ void test_vlseg3e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_tu( @@ -174,7 +174,7 @@ void test_vlseg3e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_tu( @@ -191,7 +191,7 @@ void test_vlseg3e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_tu( @@ -208,7 +208,7 @@ void test_vlseg3e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_tum( @@ -225,7 +225,7 @@ void test_vlseg3e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_tum( @@ -242,7 +242,7 @@ void test_vlseg3e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_tum( @@ -259,7 +259,7 @@ void test_vlseg3e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_tum( @@ -276,7 +276,7 @@ void test_vlseg3e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_tum( @@ -293,7 +293,7 @@ void test_vlseg3e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_tum( @@ -310,7 +310,7 @@ void test_vlseg3e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_tum( @@ -327,7 +327,7 @@ void test_vlseg3e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_tum( @@ -344,7 +344,7 @@ void test_vlseg3e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_tum( @@ -361,7 +361,7 @@ void test_vlseg3e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_tum( @@ -378,7 +378,7 @@ void test_vlseg3e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_tum( @@ -395,7 +395,7 @@ void test_vlseg3e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_tum( @@ -412,7 +412,7 @@ void test_vlseg3e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_tumu( @@ -429,7 +429,7 @@ void test_vlseg3e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_tumu( @@ -446,7 +446,7 @@ void test_vlseg3e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_tumu( @@ -463,7 +463,7 @@ void test_vlseg3e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_tumu( @@ -480,7 +480,7 @@ void test_vlseg3e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_tumu( @@ -497,7 +497,7 @@ void test_vlseg3e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_tumu( @@ -514,7 +514,7 @@ void test_vlseg3e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_tumu( @@ -531,7 +531,7 @@ void test_vlseg3e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_tumu( @@ -548,7 +548,7 @@ void test_vlseg3e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_tumu( @@ -565,7 +565,7 @@ void test_vlseg3e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_tumu( @@ -582,7 +582,7 @@ void test_vlseg3e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_tumu( @@ -599,7 +599,7 @@ void test_vlseg3e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_tumu( @@ -616,7 +616,7 @@ void test_vlseg3e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_mu( @@ -633,7 +633,7 @@ void test_vlseg3e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_mu( @@ -650,7 +650,7 @@ void test_vlseg3e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_mu( @@ -667,7 +667,7 @@ void test_vlseg3e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_mu( @@ -684,7 +684,7 @@ void test_vlseg3e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_mu( @@ -701,7 +701,7 @@ void test_vlseg3e16ff_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_mu( @@ -718,7 +718,7 @@ void test_vlseg3e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_mu( @@ -735,7 +735,7 @@ void test_vlseg3e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_mu( @@ -752,7 +752,7 @@ void test_vlseg3e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_mu( @@ -769,7 +769,7 @@ void test_vlseg3e16ff_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_mu( @@ -786,7 +786,7 @@ void test_vlseg3e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_mu( @@ -803,7 +803,7 @@ void test_vlseg3e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_mu( @@ -820,6 +820,6 @@ void test_vlseg3e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e16ff_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e16ff_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32.c index ec6b22b9a4e0..b76e4c8c45fc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_tu( @@ -34,7 +34,7 @@ void test_vlseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_tu( @@ -49,7 +49,7 @@ void test_vlseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_tu( @@ -64,7 +64,7 @@ void test_vlseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_tu( @@ -79,7 +79,7 @@ void test_vlseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_tu( @@ -94,7 +94,7 @@ void test_vlseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_tu( @@ -109,7 +109,7 @@ void test_vlseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_tu( @@ -124,7 +124,7 @@ void test_vlseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_tu( @@ -139,7 +139,7 @@ void test_vlseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_tum( @@ -154,7 +154,7 @@ void test_vlseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_tum( @@ -169,7 +169,7 @@ void test_vlseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_tum( @@ -184,7 +184,7 @@ void test_vlseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_tum( @@ -199,7 +199,7 @@ void test_vlseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_tum( @@ -214,7 +214,7 @@ void test_vlseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_tum( @@ -229,7 +229,7 @@ void test_vlseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_tum( @@ -244,7 +244,7 @@ void test_vlseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_tum( @@ -259,7 +259,7 @@ void test_vlseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_tum( @@ -274,7 +274,7 @@ void test_vlseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_tumu( @@ -289,7 +289,7 @@ void test_vlseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_tumu( @@ -304,7 +304,7 @@ void test_vlseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_tumu( @@ -319,7 +319,7 @@ void test_vlseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_tumu( @@ -334,7 +334,7 @@ void test_vlseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_tumu( @@ -349,7 +349,7 @@ void test_vlseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_tumu( @@ -364,7 +364,7 @@ void test_vlseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_tumu( @@ -379,7 +379,7 @@ void test_vlseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_tumu( @@ -394,7 +394,7 @@ void test_vlseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_tumu( @@ -409,7 +409,7 @@ void test_vlseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_mu( @@ -424,7 +424,7 @@ void test_vlseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_mu( @@ -439,7 +439,7 @@ void test_vlseg3e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_mu( @@ -454,7 +454,7 @@ void test_vlseg3e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_mu( @@ -469,7 +469,7 @@ void test_vlseg3e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_mu( @@ -484,7 +484,7 @@ void test_vlseg3e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_mu( @@ -499,7 +499,7 @@ void test_vlseg3e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_mu( @@ -514,7 +514,7 @@ void test_vlseg3e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_mu( @@ -529,7 +529,7 @@ void test_vlseg3e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_mu( @@ -544,6 +544,6 @@ void test_vlseg3e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e32_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c index 1ff1e4ecae40..2dc17a1c1787 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e32ff.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_tu( @@ -38,7 +38,7 @@ void test_vlseg3e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_tu( @@ -55,7 +55,7 @@ void test_vlseg3e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_tu( @@ -72,7 +72,7 @@ void test_vlseg3e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_tu( @@ -89,7 +89,7 @@ void test_vlseg3e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_tu( @@ -106,7 +106,7 @@ void test_vlseg3e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_tu( @@ -123,7 +123,7 @@ void test_vlseg3e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_tu( @@ -140,7 +140,7 @@ void test_vlseg3e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_tu( @@ -157,7 +157,7 @@ void test_vlseg3e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_tum( @@ -174,7 +174,7 @@ void test_vlseg3e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_tum( @@ -191,7 +191,7 @@ void test_vlseg3e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_tum( @@ -208,7 +208,7 @@ void test_vlseg3e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_tum( @@ -225,7 +225,7 @@ void test_vlseg3e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_tum( @@ -242,7 +242,7 @@ void test_vlseg3e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_tum( @@ -259,7 +259,7 @@ void test_vlseg3e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_tum( @@ -276,7 +276,7 @@ void test_vlseg3e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_tum( @@ -293,7 +293,7 @@ void test_vlseg3e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_tum( @@ -310,7 +310,7 @@ void test_vlseg3e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_tumu( @@ -327,7 +327,7 @@ void test_vlseg3e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_tumu( @@ -344,7 +344,7 @@ void test_vlseg3e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_tumu( @@ -361,7 +361,7 @@ void test_vlseg3e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_tumu( @@ -378,7 +378,7 @@ void test_vlseg3e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_tumu( @@ -395,7 +395,7 @@ void test_vlseg3e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_tumu( @@ -412,7 +412,7 @@ void test_vlseg3e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_tumu( @@ -429,7 +429,7 @@ void test_vlseg3e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_tumu( @@ -446,7 +446,7 @@ void test_vlseg3e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_tumu( @@ -463,7 +463,7 @@ void test_vlseg3e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_mu( @@ -480,7 +480,7 @@ void test_vlseg3e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_mu( @@ -497,7 +497,7 @@ void test_vlseg3e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_mu( @@ -514,7 +514,7 @@ void test_vlseg3e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_mu( @@ -531,7 +531,7 @@ void test_vlseg3e32ff_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_mu( @@ -548,7 +548,7 @@ void test_vlseg3e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_mu( @@ -565,7 +565,7 @@ void test_vlseg3e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_mu( @@ -582,7 +582,7 @@ void test_vlseg3e32ff_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_mu( @@ -599,7 +599,7 @@ void test_vlseg3e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_mu( @@ -616,6 +616,6 @@ void test_vlseg3e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e32ff_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e32ff_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64.c index 72754049587d..4be1ad590ddb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_tu( @@ -34,7 +34,7 @@ void test_vlseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_tu( @@ -49,7 +49,7 @@ void test_vlseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_tu( @@ -64,7 +64,7 @@ void test_vlseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_tu( @@ -79,7 +79,7 @@ void test_vlseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_tu( @@ -94,7 +94,7 @@ void test_vlseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_tum( @@ -109,7 +109,7 @@ void test_vlseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_tum( @@ -124,7 +124,7 @@ void test_vlseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_tum( @@ -139,7 +139,7 @@ void test_vlseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_tum( @@ -154,7 +154,7 @@ void test_vlseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_tum( @@ -169,7 +169,7 @@ void test_vlseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_tum( @@ -184,7 +184,7 @@ void test_vlseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_tumu( @@ -199,7 +199,7 @@ void test_vlseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_tumu( @@ -214,7 +214,7 @@ void test_vlseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_tumu( @@ -229,7 +229,7 @@ void test_vlseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_tumu( @@ -244,7 +244,7 @@ void test_vlseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_tumu( @@ -259,7 +259,7 @@ void test_vlseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_tumu( @@ -274,7 +274,7 @@ void test_vlseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_mu( @@ -289,7 +289,7 @@ void test_vlseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_mu( @@ -304,7 +304,7 @@ void test_vlseg3e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_mu( @@ -319,7 +319,7 @@ void test_vlseg3e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_mu( @@ -334,7 +334,7 @@ void test_vlseg3e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_mu( @@ -349,7 +349,7 @@ void test_vlseg3e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_mu( @@ -364,6 +364,6 @@ void test_vlseg3e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e64_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c index 2652ba64b596..f441d5e8c9ce 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e64ff.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_tu( @@ -38,7 +38,7 @@ void test_vlseg3e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_tu( @@ -55,7 +55,7 @@ void test_vlseg3e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_tu( @@ -72,7 +72,7 @@ void test_vlseg3e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_tu( @@ -89,7 +89,7 @@ void test_vlseg3e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_tu( @@ -106,7 +106,7 @@ void test_vlseg3e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_tum( @@ -123,7 +123,7 @@ void test_vlseg3e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_tum( @@ -140,7 +140,7 @@ void test_vlseg3e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_tum( @@ -157,7 +157,7 @@ void test_vlseg3e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_tum( @@ -174,7 +174,7 @@ void test_vlseg3e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_tum( @@ -191,7 +191,7 @@ void test_vlseg3e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_tum( @@ -208,7 +208,7 @@ void test_vlseg3e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_tumu( @@ -225,7 +225,7 @@ void test_vlseg3e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_tumu( @@ -242,7 +242,7 @@ void test_vlseg3e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_tumu( @@ -259,7 +259,7 @@ void test_vlseg3e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_tumu( @@ -276,7 +276,7 @@ void test_vlseg3e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_tumu( @@ -293,7 +293,7 @@ void test_vlseg3e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_tumu( @@ -310,7 +310,7 @@ void test_vlseg3e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_mu( @@ -327,7 +327,7 @@ void test_vlseg3e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_mu( @@ -344,7 +344,7 @@ void test_vlseg3e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_mu( @@ -361,7 +361,7 @@ void test_vlseg3e64ff_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_mu( @@ -378,7 +378,7 @@ void test_vlseg3e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_mu( @@ -395,7 +395,7 @@ void test_vlseg3e64ff_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_mu( @@ -412,6 +412,6 @@ void test_vlseg3e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e64ff_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e64ff_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8.c index f5b6d68aaa1a..9bddd8420e31 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8.c @@ -18,7 +18,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_tu( @@ -33,7 +33,7 @@ void test_vlseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_tu( @@ -48,7 +48,7 @@ void test_vlseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_tu( @@ -63,7 +63,7 @@ void test_vlseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_tu( @@ -78,7 +78,7 @@ void test_vlseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_tu( @@ -93,7 +93,7 @@ void test_vlseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_tu( @@ -108,7 +108,7 @@ void test_vlseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_tu( @@ -123,7 +123,7 @@ void test_vlseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_tu( @@ -138,7 +138,7 @@ void test_vlseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_tu( @@ -153,7 +153,7 @@ void test_vlseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_tum( @@ -168,7 +168,7 @@ void test_vlseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_tum( @@ -183,7 +183,7 @@ void test_vlseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_tum( @@ -198,7 +198,7 @@ void test_vlseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_tum( @@ -213,7 +213,7 @@ void test_vlseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_tum( @@ -228,7 +228,7 @@ void test_vlseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_tum( @@ -243,7 +243,7 @@ void test_vlseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_tum( @@ -258,7 +258,7 @@ void test_vlseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_tum( @@ -273,7 +273,7 @@ void test_vlseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_tum( @@ -288,7 +288,7 @@ void test_vlseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_tum( @@ -303,7 +303,7 @@ void test_vlseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_tumu( @@ -318,7 +318,7 @@ void test_vlseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_tumu( @@ -333,7 +333,7 @@ void test_vlseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_tumu( @@ -348,7 +348,7 @@ void test_vlseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_tumu( @@ -363,7 +363,7 @@ void test_vlseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_tumu( @@ -378,7 +378,7 @@ void test_vlseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_tumu( @@ -393,7 +393,7 @@ void test_vlseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_tumu( @@ -408,7 +408,7 @@ void test_vlseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_tumu( @@ -423,7 +423,7 @@ void test_vlseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_tumu( @@ -438,7 +438,7 @@ void test_vlseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_tumu( @@ -453,7 +453,7 @@ void test_vlseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_mu( @@ -468,7 +468,7 @@ void test_vlseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_mu( @@ -483,7 +483,7 @@ void test_vlseg3e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_mu( @@ -498,7 +498,7 @@ void test_vlseg3e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_mu( @@ -513,7 +513,7 @@ void test_vlseg3e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_mu( @@ -528,7 +528,7 @@ void test_vlseg3e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8 // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_mu( @@ -543,7 +543,7 @@ void test_vlseg3e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4 // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_mu( @@ -558,7 +558,7 @@ void test_vlseg3e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_mu( @@ -573,7 +573,7 @@ void test_vlseg3e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_mu( @@ -588,7 +588,7 @@ void test_vlseg3e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_mu( @@ -603,6 +603,6 @@ void test_vlseg3e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vlseg3e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); + return __riscv_vlseg3e8_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c index da85e8dbbb6d..4e198b2a3ab5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg3e8ff.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_tu( @@ -38,7 +38,7 @@ void test_vlseg3e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_tu( @@ -55,7 +55,7 @@ void test_vlseg3e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_tu( @@ -72,7 +72,7 @@ void test_vlseg3e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_tu( @@ -89,7 +89,7 @@ void test_vlseg3e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_tu( @@ -106,7 +106,7 @@ void test_vlseg3e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_tu( @@ -123,7 +123,7 @@ void test_vlseg3e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_tu( @@ -140,7 +140,7 @@ void test_vlseg3e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_tu( @@ -157,7 +157,7 @@ void test_vlseg3e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_tu( @@ -174,7 +174,7 @@ void test_vlseg3e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_tum( @@ -191,7 +191,7 @@ void test_vlseg3e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_tum( @@ -208,7 +208,7 @@ void test_vlseg3e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_tum( @@ -225,7 +225,7 @@ void test_vlseg3e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_tum( @@ -242,7 +242,7 @@ void test_vlseg3e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_tum( @@ -259,7 +259,7 @@ void test_vlseg3e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_tum( @@ -276,7 +276,7 @@ void test_vlseg3e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_tum( @@ -293,7 +293,7 @@ void test_vlseg3e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_tum( @@ -310,7 +310,7 @@ void test_vlseg3e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_tum( @@ -327,7 +327,7 @@ void test_vlseg3e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_tum( @@ -344,7 +344,7 @@ void test_vlseg3e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_tumu( @@ -361,7 +361,7 @@ void test_vlseg3e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_tumu( @@ -378,7 +378,7 @@ void test_vlseg3e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_tumu( @@ -395,7 +395,7 @@ void test_vlseg3e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_tumu( @@ -412,7 +412,7 @@ void test_vlseg3e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_tumu( @@ -429,7 +429,7 @@ void test_vlseg3e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_tumu( @@ -446,7 +446,7 @@ void test_vlseg3e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_tumu( @@ -463,7 +463,7 @@ void test_vlseg3e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_tumu( @@ -480,7 +480,7 @@ void test_vlseg3e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_tumu( @@ -497,7 +497,7 @@ void test_vlseg3e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_tumu( @@ -514,7 +514,7 @@ void test_vlseg3e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_mu( @@ -531,7 +531,7 @@ void test_vlseg3e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_mu( @@ -548,7 +548,7 @@ void test_vlseg3e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_mu( @@ -565,7 +565,7 @@ void test_vlseg3e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_mu( @@ -582,7 +582,7 @@ void test_vlseg3e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_mu( @@ -599,7 +599,7 @@ void test_vlseg3e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_mu( @@ -616,7 +616,7 @@ void test_vlseg3e8ff_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_mu( @@ -633,7 +633,7 @@ void test_vlseg3e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_mu( @@ -650,7 +650,7 @@ void test_vlseg3e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_mu( @@ -667,7 +667,7 @@ void test_vlseg3e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_mu( @@ -684,6 +684,6 @@ void test_vlseg3e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg3e8ff_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); + return __riscv_vlseg3e8ff_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16.c index 03690f9c41cf..1edb36bfcffb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_tu( @@ -38,7 +38,7 @@ void test_vlseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_tu( @@ -55,7 +55,7 @@ void test_vlseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_tu( @@ -72,7 +72,7 @@ void test_vlseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_tu( @@ -89,7 +89,7 @@ void test_vlseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_tu( @@ -106,7 +106,7 @@ void test_vlseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_tu( @@ -123,7 +123,7 @@ void test_vlseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_tu( @@ -140,7 +140,7 @@ void test_vlseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_tu( @@ -157,7 +157,7 @@ void test_vlseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_tu( @@ -174,7 +174,7 @@ void test_vlseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_tu( @@ -191,7 +191,7 @@ void test_vlseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_tu( @@ -208,7 +208,7 @@ void test_vlseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_tum( @@ -225,7 +225,7 @@ void test_vlseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_tum( @@ -242,7 +242,7 @@ void test_vlseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_tum( @@ -259,7 +259,7 @@ void test_vlseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_tum( @@ -276,7 +276,7 @@ void test_vlseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_tum( @@ -293,7 +293,7 @@ void test_vlseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_tum( @@ -310,7 +310,7 @@ void test_vlseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_tum( @@ -327,7 +327,7 @@ void test_vlseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_tum( @@ -344,7 +344,7 @@ void test_vlseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_tum( @@ -361,7 +361,7 @@ void test_vlseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_tum( @@ -378,7 +378,7 @@ void test_vlseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_tum( @@ -395,7 +395,7 @@ void test_vlseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_tum( @@ -412,7 +412,7 @@ void test_vlseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_tumu( @@ -429,7 +429,7 @@ void test_vlseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_tumu( @@ -446,7 +446,7 @@ void test_vlseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_tumu( @@ -463,7 +463,7 @@ void test_vlseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_tumu( @@ -480,7 +480,7 @@ void test_vlseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_tumu( @@ -497,7 +497,7 @@ void test_vlseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_tumu( @@ -514,7 +514,7 @@ void test_vlseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_tumu( @@ -531,7 +531,7 @@ void test_vlseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_tumu( @@ -548,7 +548,7 @@ void test_vlseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_tumu( @@ -565,7 +565,7 @@ void test_vlseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_tumu( @@ -582,7 +582,7 @@ void test_vlseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_tumu( @@ -599,7 +599,7 @@ void test_vlseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_tumu( @@ -616,7 +616,7 @@ void test_vlseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_mu( @@ -633,7 +633,7 @@ void test_vlseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_mu( @@ -650,7 +650,7 @@ void test_vlseg4e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_mu( @@ -667,7 +667,7 @@ void test_vlseg4e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_mu( @@ -684,7 +684,7 @@ void test_vlseg4e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_mu( @@ -701,7 +701,7 @@ void test_vlseg4e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_mu( @@ -718,7 +718,7 @@ void test_vlseg4e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_mu( @@ -735,7 +735,7 @@ void test_vlseg4e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_mu( @@ -752,7 +752,7 @@ void test_vlseg4e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_mu( @@ -769,7 +769,7 @@ void test_vlseg4e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_mu( @@ -786,7 +786,7 @@ void test_vlseg4e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_mu( @@ -803,7 +803,7 @@ void test_vlseg4e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_mu( @@ -820,6 +820,6 @@ void test_vlseg4e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e16_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c index 05f70849922c..21c24e1cfa5b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e16ff.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_tu( @@ -42,7 +42,7 @@ void test_vlseg4e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_tu( @@ -61,7 +61,7 @@ void test_vlseg4e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_tu( @@ -80,7 +80,7 @@ void test_vlseg4e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_tu( @@ -99,7 +99,7 @@ void test_vlseg4e16ff_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_tu( @@ -118,7 +118,7 @@ void test_vlseg4e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_tu( @@ -137,7 +137,7 @@ void test_vlseg4e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_tu( @@ -156,7 +156,7 @@ void test_vlseg4e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_tu( @@ -175,7 +175,7 @@ void test_vlseg4e16ff_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_tu( @@ -194,7 +194,7 @@ void test_vlseg4e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_tu( @@ -213,7 +213,7 @@ void test_vlseg4e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_tu( @@ -232,7 +232,7 @@ void test_vlseg4e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_tum( @@ -251,7 +251,7 @@ void test_vlseg4e16ff_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_tum( @@ -270,7 +270,7 @@ void test_vlseg4e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_tum( @@ -289,7 +289,7 @@ void test_vlseg4e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_tum( @@ -308,7 +308,7 @@ void test_vlseg4e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_tum( @@ -327,7 +327,7 @@ void test_vlseg4e16ff_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_tum( @@ -346,7 +346,7 @@ void test_vlseg4e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_tum( @@ -365,7 +365,7 @@ void test_vlseg4e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_tum( @@ -384,7 +384,7 @@ void test_vlseg4e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_tum( @@ -403,7 +403,7 @@ void test_vlseg4e16ff_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_tum( @@ -422,7 +422,7 @@ void test_vlseg4e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_tum( @@ -441,7 +441,7 @@ void test_vlseg4e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_tum( @@ -460,7 +460,7 @@ void test_vlseg4e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_tumu( @@ -479,7 +479,7 @@ void test_vlseg4e16ff_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_tumu( @@ -498,7 +498,7 @@ void test_vlseg4e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_tumu( @@ -517,7 +517,7 @@ void test_vlseg4e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_tumu( @@ -536,7 +536,7 @@ void test_vlseg4e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_tumu( @@ -555,7 +555,7 @@ void test_vlseg4e16ff_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_tumu( @@ -574,7 +574,7 @@ void test_vlseg4e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_tumu( @@ -593,7 +593,7 @@ void test_vlseg4e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_tumu( @@ -612,7 +612,7 @@ void test_vlseg4e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_tumu( @@ -631,7 +631,7 @@ void test_vlseg4e16ff_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_tumu( @@ -650,7 +650,7 @@ void test_vlseg4e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_tumu( @@ -669,7 +669,7 @@ void test_vlseg4e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_tumu( @@ -688,7 +688,7 @@ void test_vlseg4e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_mu( @@ -707,7 +707,7 @@ void test_vlseg4e16ff_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_mu( @@ -726,7 +726,7 @@ void test_vlseg4e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_mu( @@ -745,7 +745,7 @@ void test_vlseg4e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_mu( @@ -764,7 +764,7 @@ void test_vlseg4e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_mu( @@ -783,7 +783,7 @@ void test_vlseg4e16ff_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_mu( @@ -802,7 +802,7 @@ void test_vlseg4e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_mu( @@ -821,7 +821,7 @@ void test_vlseg4e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_mu( @@ -840,7 +840,7 @@ void test_vlseg4e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_mu( @@ -859,7 +859,7 @@ void test_vlseg4e16ff_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_mu( @@ -878,7 +878,7 @@ void test_vlseg4e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_mu( @@ -897,7 +897,7 @@ void test_vlseg4e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_mu( @@ -916,6 +916,6 @@ void test_vlseg4e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e16ff_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e16ff_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32.c index d8320069299b..748a96aeb5b3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_tu( @@ -38,7 +38,7 @@ void test_vlseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_tu( @@ -55,7 +55,7 @@ void test_vlseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_tu( @@ -72,7 +72,7 @@ void test_vlseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_tu( @@ -89,7 +89,7 @@ void test_vlseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_tu( @@ -106,7 +106,7 @@ void test_vlseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_tu( @@ -123,7 +123,7 @@ void test_vlseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_tu( @@ -140,7 +140,7 @@ void test_vlseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_tu( @@ -157,7 +157,7 @@ void test_vlseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_tum( @@ -174,7 +174,7 @@ void test_vlseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_tum( @@ -191,7 +191,7 @@ void test_vlseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_tum( @@ -208,7 +208,7 @@ void test_vlseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_tum( @@ -225,7 +225,7 @@ void test_vlseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_tum( @@ -242,7 +242,7 @@ void test_vlseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_tum( @@ -259,7 +259,7 @@ void test_vlseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_tum( @@ -276,7 +276,7 @@ void test_vlseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_tum( @@ -293,7 +293,7 @@ void test_vlseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_tum( @@ -310,7 +310,7 @@ void test_vlseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_tumu( @@ -327,7 +327,7 @@ void test_vlseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_tumu( @@ -344,7 +344,7 @@ void test_vlseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_tumu( @@ -361,7 +361,7 @@ void test_vlseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_tumu( @@ -378,7 +378,7 @@ void test_vlseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_tumu( @@ -395,7 +395,7 @@ void test_vlseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_tumu( @@ -412,7 +412,7 @@ void test_vlseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_tumu( @@ -429,7 +429,7 @@ void test_vlseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_tumu( @@ -446,7 +446,7 @@ void test_vlseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_tumu( @@ -463,7 +463,7 @@ void test_vlseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_mu( @@ -480,7 +480,7 @@ void test_vlseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_mu( @@ -497,7 +497,7 @@ void test_vlseg4e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_mu( @@ -514,7 +514,7 @@ void test_vlseg4e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_mu( @@ -531,7 +531,7 @@ void test_vlseg4e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_mu( @@ -548,7 +548,7 @@ void test_vlseg4e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_mu( @@ -565,7 +565,7 @@ void test_vlseg4e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_mu( @@ -582,7 +582,7 @@ void test_vlseg4e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_mu( @@ -599,7 +599,7 @@ void test_vlseg4e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_mu( @@ -616,6 +616,6 @@ void test_vlseg4e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e32_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c index 95426e888154..80777fb7ab0a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e32ff.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_tu( @@ -42,7 +42,7 @@ void test_vlseg4e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_tu( @@ -61,7 +61,7 @@ void test_vlseg4e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_tu( @@ -80,7 +80,7 @@ void test_vlseg4e32ff_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_tu( @@ -99,7 +99,7 @@ void test_vlseg4e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_tu( @@ -118,7 +118,7 @@ void test_vlseg4e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_tu( @@ -137,7 +137,7 @@ void test_vlseg4e32ff_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_tu( @@ -156,7 +156,7 @@ void test_vlseg4e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_tu( @@ -175,7 +175,7 @@ void test_vlseg4e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_tum( @@ -194,7 +194,7 @@ void test_vlseg4e32ff_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_tum( @@ -213,7 +213,7 @@ void test_vlseg4e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_tum( @@ -232,7 +232,7 @@ void test_vlseg4e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_tum( @@ -251,7 +251,7 @@ void test_vlseg4e32ff_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_tum( @@ -270,7 +270,7 @@ void test_vlseg4e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_tum( @@ -289,7 +289,7 @@ void test_vlseg4e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_tum( @@ -308,7 +308,7 @@ void test_vlseg4e32ff_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_tum( @@ -327,7 +327,7 @@ void test_vlseg4e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_tum( @@ -346,7 +346,7 @@ void test_vlseg4e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_tumu( @@ -365,7 +365,7 @@ void test_vlseg4e32ff_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_tumu( @@ -384,7 +384,7 @@ void test_vlseg4e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_tumu( @@ -403,7 +403,7 @@ void test_vlseg4e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_tumu( @@ -422,7 +422,7 @@ void test_vlseg4e32ff_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_tumu( @@ -441,7 +441,7 @@ void test_vlseg4e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_tumu( @@ -460,7 +460,7 @@ void test_vlseg4e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_tumu( @@ -479,7 +479,7 @@ void test_vlseg4e32ff_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_tumu( @@ -498,7 +498,7 @@ void test_vlseg4e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_tumu( @@ -517,7 +517,7 @@ void test_vlseg4e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_mu( @@ -536,7 +536,7 @@ void test_vlseg4e32ff_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_mu( @@ -555,7 +555,7 @@ void test_vlseg4e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_mu( @@ -574,7 +574,7 @@ void test_vlseg4e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_mu( @@ -593,7 +593,7 @@ void test_vlseg4e32ff_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_mu( @@ -612,7 +612,7 @@ void test_vlseg4e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_mu( @@ -631,7 +631,7 @@ void test_vlseg4e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_mu( @@ -650,7 +650,7 @@ void test_vlseg4e32ff_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_mu( @@ -669,7 +669,7 @@ void test_vlseg4e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_mu( @@ -688,6 +688,6 @@ void test_vlseg4e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e32ff_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e32ff_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64.c index 38b8b75f454f..2be60fb22207 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_tu( @@ -38,7 +38,7 @@ void test_vlseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_tu( @@ -55,7 +55,7 @@ void test_vlseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_tu( @@ -72,7 +72,7 @@ void test_vlseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_tu( @@ -89,7 +89,7 @@ void test_vlseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_tu( @@ -106,7 +106,7 @@ void test_vlseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_tum( @@ -123,7 +123,7 @@ void test_vlseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_tum( @@ -140,7 +140,7 @@ void test_vlseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_tum( @@ -157,7 +157,7 @@ void test_vlseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_tum( @@ -174,7 +174,7 @@ void test_vlseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_tum( @@ -191,7 +191,7 @@ void test_vlseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_tum( @@ -208,7 +208,7 @@ void test_vlseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_tumu( @@ -225,7 +225,7 @@ void test_vlseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_tumu( @@ -242,7 +242,7 @@ void test_vlseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_tumu( @@ -259,7 +259,7 @@ void test_vlseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_tumu( @@ -276,7 +276,7 @@ void test_vlseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_tumu( @@ -293,7 +293,7 @@ void test_vlseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_tumu( @@ -310,7 +310,7 @@ void test_vlseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_mu( @@ -327,7 +327,7 @@ void test_vlseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_mu( @@ -344,7 +344,7 @@ void test_vlseg4e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_mu( @@ -361,7 +361,7 @@ void test_vlseg4e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_mu( @@ -378,7 +378,7 @@ void test_vlseg4e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_mu( @@ -395,7 +395,7 @@ void test_vlseg4e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_mu( @@ -412,6 +412,6 @@ void test_vlseg4e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e64_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c index 654ce9be0f79..13541197def9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e64ff.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_tu( @@ -42,7 +42,7 @@ void test_vlseg4e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_tu( @@ -61,7 +61,7 @@ void test_vlseg4e64ff_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_tu( @@ -80,7 +80,7 @@ void test_vlseg4e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_tu( @@ -99,7 +99,7 @@ void test_vlseg4e64ff_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_tu( @@ -118,7 +118,7 @@ void test_vlseg4e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_tum( @@ -137,7 +137,7 @@ void test_vlseg4e64ff_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_tum( @@ -156,7 +156,7 @@ void test_vlseg4e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_tum( @@ -175,7 +175,7 @@ void test_vlseg4e64ff_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_tum( @@ -194,7 +194,7 @@ void test_vlseg4e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_tum( @@ -213,7 +213,7 @@ void test_vlseg4e64ff_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_tum( @@ -232,7 +232,7 @@ void test_vlseg4e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_tumu( @@ -251,7 +251,7 @@ void test_vlseg4e64ff_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_tumu( @@ -270,7 +270,7 @@ void test_vlseg4e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_tumu( @@ -289,7 +289,7 @@ void test_vlseg4e64ff_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_tumu( @@ -308,7 +308,7 @@ void test_vlseg4e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_tumu( @@ -327,7 +327,7 @@ void test_vlseg4e64ff_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_tumu( @@ -346,7 +346,7 @@ void test_vlseg4e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_mu( @@ -365,7 +365,7 @@ void test_vlseg4e64ff_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_mu( @@ -384,7 +384,7 @@ void test_vlseg4e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_mu( @@ -403,7 +403,7 @@ void test_vlseg4e64ff_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_mu( @@ -422,7 +422,7 @@ void test_vlseg4e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_mu( @@ -441,7 +441,7 @@ void test_vlseg4e64ff_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_mu( @@ -460,6 +460,6 @@ void test_vlseg4e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e64ff_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e64ff_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8.c index 69d9e9f097df..85ef88531ea7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8.c @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_tu( @@ -37,7 +37,7 @@ void test_vlseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_tu( @@ -54,7 +54,7 @@ void test_vlseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_tu( @@ -71,7 +71,7 @@ void test_vlseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_tu( @@ -88,7 +88,7 @@ void test_vlseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_tu( @@ -105,7 +105,7 @@ void test_vlseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_tu( @@ -122,7 +122,7 @@ void test_vlseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_tu( @@ -139,7 +139,7 @@ void test_vlseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_tu( @@ -156,7 +156,7 @@ void test_vlseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_tu( @@ -173,7 +173,7 @@ void test_vlseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_tum( @@ -190,7 +190,7 @@ void test_vlseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_tum( @@ -207,7 +207,7 @@ void test_vlseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_tum( @@ -224,7 +224,7 @@ void test_vlseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_tum( @@ -241,7 +241,7 @@ void test_vlseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_tum( @@ -258,7 +258,7 @@ void test_vlseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_tum( @@ -275,7 +275,7 @@ void test_vlseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_tum( @@ -292,7 +292,7 @@ void test_vlseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_tum( @@ -309,7 +309,7 @@ void test_vlseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_tum( @@ -326,7 +326,7 @@ void test_vlseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_tum( @@ -343,7 +343,7 @@ void test_vlseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_tumu( @@ -360,7 +360,7 @@ void test_vlseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_tumu( @@ -377,7 +377,7 @@ void test_vlseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_tumu( @@ -394,7 +394,7 @@ void test_vlseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_tumu( @@ -411,7 +411,7 @@ void test_vlseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_tumu( @@ -428,7 +428,7 @@ void test_vlseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_tumu( @@ -445,7 +445,7 @@ void test_vlseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_tumu( @@ -462,7 +462,7 @@ void test_vlseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_tumu( @@ -479,7 +479,7 @@ void test_vlseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_tumu( @@ -496,7 +496,7 @@ void test_vlseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_tumu( @@ -513,7 +513,7 @@ void test_vlseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_mu( @@ -530,7 +530,7 @@ void test_vlseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_mu( @@ -547,7 +547,7 @@ void test_vlseg4e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_mu( @@ -564,7 +564,7 @@ void test_vlseg4e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_mu( @@ -581,7 +581,7 @@ void test_vlseg4e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_mu( @@ -598,7 +598,7 @@ void test_vlseg4e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_mu( @@ -615,7 +615,7 @@ void test_vlseg4e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_mu( @@ -632,7 +632,7 @@ void test_vlseg4e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_mu( @@ -649,7 +649,7 @@ void test_vlseg4e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_mu( @@ -666,7 +666,7 @@ void test_vlseg4e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_mu( @@ -683,6 +683,6 @@ void test_vlseg4e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vlseg4e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); + return __riscv_vlseg4e8_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c index 3094ea1a3a77..91ed238c4227 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg4e8ff.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_tu( @@ -42,7 +42,7 @@ void test_vlseg4e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_tu( @@ -61,7 +61,7 @@ void test_vlseg4e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_tu( @@ -80,7 +80,7 @@ void test_vlseg4e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_tu( @@ -99,7 +99,7 @@ void test_vlseg4e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_tu( @@ -118,7 +118,7 @@ void test_vlseg4e8ff_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_tu( @@ -137,7 +137,7 @@ void test_vlseg4e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_tu( @@ -156,7 +156,7 @@ void test_vlseg4e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_tu( @@ -175,7 +175,7 @@ void test_vlseg4e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_tu( @@ -194,7 +194,7 @@ void test_vlseg4e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_tum( @@ -213,7 +213,7 @@ void test_vlseg4e8ff_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_tum( @@ -232,7 +232,7 @@ void test_vlseg4e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_tum( @@ -251,7 +251,7 @@ void test_vlseg4e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_tum( @@ -270,7 +270,7 @@ void test_vlseg4e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_tum( @@ -289,7 +289,7 @@ void test_vlseg4e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_tum( @@ -308,7 +308,7 @@ void test_vlseg4e8ff_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_tum( @@ -327,7 +327,7 @@ void test_vlseg4e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_tum( @@ -346,7 +346,7 @@ void test_vlseg4e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_tum( @@ -365,7 +365,7 @@ void test_vlseg4e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_tum( @@ -384,7 +384,7 @@ void test_vlseg4e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_tumu( @@ -403,7 +403,7 @@ void test_vlseg4e8ff_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_tumu( @@ -422,7 +422,7 @@ void test_vlseg4e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_tumu( @@ -441,7 +441,7 @@ void test_vlseg4e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_tumu( @@ -460,7 +460,7 @@ void test_vlseg4e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_tumu( @@ -479,7 +479,7 @@ void test_vlseg4e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_tumu( @@ -498,7 +498,7 @@ void test_vlseg4e8ff_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_tumu( @@ -517,7 +517,7 @@ void test_vlseg4e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_tumu( @@ -536,7 +536,7 @@ void test_vlseg4e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_tumu( @@ -555,7 +555,7 @@ void test_vlseg4e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_tumu( @@ -574,7 +574,7 @@ void test_vlseg4e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_mu( @@ -593,7 +593,7 @@ void test_vlseg4e8ff_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_mu( @@ -612,7 +612,7 @@ void test_vlseg4e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_mu( @@ -631,7 +631,7 @@ void test_vlseg4e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_mu( @@ -650,7 +650,7 @@ void test_vlseg4e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_mu( @@ -669,7 +669,7 @@ void test_vlseg4e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_mu( @@ -688,7 +688,7 @@ void test_vlseg4e8ff_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_mu( @@ -707,7 +707,7 @@ void test_vlseg4e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_mu( @@ -726,7 +726,7 @@ void test_vlseg4e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_mu( @@ -745,7 +745,7 @@ void test_vlseg4e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_mu( @@ -764,6 +764,6 @@ void test_vlseg4e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg4e8ff_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); + return __riscv_vlseg4e8ff_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16.c index d4387258e16f..3f1e54ae898b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_tu( @@ -42,7 +42,7 @@ void test_vlseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_tu( @@ -61,7 +61,7 @@ void test_vlseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_tu( @@ -80,7 +80,7 @@ void test_vlseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_tu( @@ -99,7 +99,7 @@ void test_vlseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_tu( @@ -118,7 +118,7 @@ void test_vlseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_tu( @@ -137,7 +137,7 @@ void test_vlseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_tu( @@ -156,7 +156,7 @@ void test_vlseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_tu( @@ -175,7 +175,7 @@ void test_vlseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_tum( @@ -194,7 +194,7 @@ void test_vlseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_tum( @@ -213,7 +213,7 @@ void test_vlseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_tum( @@ -232,7 +232,7 @@ void test_vlseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_tum( @@ -251,7 +251,7 @@ void test_vlseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_tum( @@ -270,7 +270,7 @@ void test_vlseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_tum( @@ -289,7 +289,7 @@ void test_vlseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_tum( @@ -308,7 +308,7 @@ void test_vlseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_tum( @@ -327,7 +327,7 @@ void test_vlseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_tum( @@ -346,7 +346,7 @@ void test_vlseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_tumu( @@ -365,7 +365,7 @@ void test_vlseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_tumu( @@ -384,7 +384,7 @@ void test_vlseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_tumu( @@ -403,7 +403,7 @@ void test_vlseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_tumu( @@ -422,7 +422,7 @@ void test_vlseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_tumu( @@ -441,7 +441,7 @@ void test_vlseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_tumu( @@ -460,7 +460,7 @@ void test_vlseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_tumu( @@ -479,7 +479,7 @@ void test_vlseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_tumu( @@ -498,7 +498,7 @@ void test_vlseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_tumu( @@ -517,7 +517,7 @@ void test_vlseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_mu( @@ -536,7 +536,7 @@ void test_vlseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_mu( @@ -555,7 +555,7 @@ void test_vlseg5e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_mu( @@ -574,7 +574,7 @@ void test_vlseg5e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_mu( @@ -593,7 +593,7 @@ void test_vlseg5e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_mu( @@ -612,7 +612,7 @@ void test_vlseg5e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_mu( @@ -631,7 +631,7 @@ void test_vlseg5e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_mu( @@ -650,7 +650,7 @@ void test_vlseg5e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_mu( @@ -669,7 +669,7 @@ void test_vlseg5e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_mu( @@ -688,6 +688,6 @@ void test_vlseg5e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e16_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c index f274e71db538..6a676f89c562 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e16ff.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_tu( @@ -46,7 +46,7 @@ void test_vlseg5e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_tu( @@ -67,7 +67,7 @@ void test_vlseg5e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_tu( @@ -88,7 +88,7 @@ void test_vlseg5e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_tu( @@ -109,7 +109,7 @@ void test_vlseg5e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_tu( @@ -130,7 +130,7 @@ void test_vlseg5e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_tu( @@ -151,7 +151,7 @@ void test_vlseg5e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_tu( @@ -172,7 +172,7 @@ void test_vlseg5e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_tu( @@ -193,7 +193,7 @@ void test_vlseg5e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_tum( @@ -214,7 +214,7 @@ void test_vlseg5e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_tum( @@ -235,7 +235,7 @@ void test_vlseg5e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_tum( @@ -256,7 +256,7 @@ void test_vlseg5e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_tum( @@ -277,7 +277,7 @@ void test_vlseg5e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_tum( @@ -298,7 +298,7 @@ void test_vlseg5e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_tum( @@ -319,7 +319,7 @@ void test_vlseg5e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_tum( @@ -340,7 +340,7 @@ void test_vlseg5e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_tum( @@ -361,7 +361,7 @@ void test_vlseg5e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_tum( @@ -382,7 +382,7 @@ void test_vlseg5e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_tumu( @@ -403,7 +403,7 @@ void test_vlseg5e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_tumu( @@ -424,7 +424,7 @@ void test_vlseg5e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_tumu( @@ -445,7 +445,7 @@ void test_vlseg5e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_tumu( @@ -466,7 +466,7 @@ void test_vlseg5e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_tumu( @@ -487,7 +487,7 @@ void test_vlseg5e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_tumu( @@ -508,7 +508,7 @@ void test_vlseg5e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_tumu( @@ -529,7 +529,7 @@ void test_vlseg5e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_tumu( @@ -550,7 +550,7 @@ void test_vlseg5e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_tumu( @@ -571,7 +571,7 @@ void test_vlseg5e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_mu( @@ -592,7 +592,7 @@ void test_vlseg5e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_mu( @@ -613,7 +613,7 @@ void test_vlseg5e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_mu( @@ -634,7 +634,7 @@ void test_vlseg5e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_mu( @@ -655,7 +655,7 @@ void test_vlseg5e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_mu( @@ -676,7 +676,7 @@ void test_vlseg5e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_mu( @@ -697,7 +697,7 @@ void test_vlseg5e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_mu( @@ -718,7 +718,7 @@ void test_vlseg5e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_mu( @@ -739,7 +739,7 @@ void test_vlseg5e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_mu( @@ -760,6 +760,6 @@ void test_vlseg5e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e16ff_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32.c index 47ef593e5170..b8c636f4c2a5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_tu( @@ -42,7 +42,7 @@ void test_vlseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_tu( @@ -61,7 +61,7 @@ void test_vlseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_tu( @@ -80,7 +80,7 @@ void test_vlseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_tu( @@ -99,7 +99,7 @@ void test_vlseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_tu( @@ -118,7 +118,7 @@ void test_vlseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_tum( @@ -137,7 +137,7 @@ void test_vlseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_tum( @@ -156,7 +156,7 @@ void test_vlseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_tum( @@ -175,7 +175,7 @@ void test_vlseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_tum( @@ -194,7 +194,7 @@ void test_vlseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_tum( @@ -213,7 +213,7 @@ void test_vlseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_tum( @@ -232,7 +232,7 @@ void test_vlseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_tumu( @@ -251,7 +251,7 @@ void test_vlseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_tumu( @@ -270,7 +270,7 @@ void test_vlseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_tumu( @@ -289,7 +289,7 @@ void test_vlseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_tumu( @@ -308,7 +308,7 @@ void test_vlseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_tumu( @@ -327,7 +327,7 @@ void test_vlseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_tumu( @@ -346,7 +346,7 @@ void test_vlseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_mu( @@ -365,7 +365,7 @@ void test_vlseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_mu( @@ -384,7 +384,7 @@ void test_vlseg5e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_mu( @@ -403,7 +403,7 @@ void test_vlseg5e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_mu( @@ -422,7 +422,7 @@ void test_vlseg5e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_mu( @@ -441,7 +441,7 @@ void test_vlseg5e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_mu( @@ -460,6 +460,6 @@ void test_vlseg5e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e32_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c index f69f920b673b..d2502c1eabd6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e32ff.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_tu( @@ -46,7 +46,7 @@ void test_vlseg5e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_tu( @@ -67,7 +67,7 @@ void test_vlseg5e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_tu( @@ -88,7 +88,7 @@ void test_vlseg5e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_tu( @@ -109,7 +109,7 @@ void test_vlseg5e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_tu( @@ -130,7 +130,7 @@ void test_vlseg5e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_tum( @@ -151,7 +151,7 @@ void test_vlseg5e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_tum( @@ -172,7 +172,7 @@ void test_vlseg5e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_tum( @@ -193,7 +193,7 @@ void test_vlseg5e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_tum( @@ -214,7 +214,7 @@ void test_vlseg5e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_tum( @@ -235,7 +235,7 @@ void test_vlseg5e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_tum( @@ -256,7 +256,7 @@ void test_vlseg5e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_tumu( @@ -277,7 +277,7 @@ void test_vlseg5e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_tumu( @@ -298,7 +298,7 @@ void test_vlseg5e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_tumu( @@ -319,7 +319,7 @@ void test_vlseg5e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_tumu( @@ -340,7 +340,7 @@ void test_vlseg5e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_tumu( @@ -361,7 +361,7 @@ void test_vlseg5e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_tumu( @@ -382,7 +382,7 @@ void test_vlseg5e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_mu( @@ -403,7 +403,7 @@ void test_vlseg5e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_mu( @@ -424,7 +424,7 @@ void test_vlseg5e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_mu( @@ -445,7 +445,7 @@ void test_vlseg5e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_mu( @@ -466,7 +466,7 @@ void test_vlseg5e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_mu( @@ -487,7 +487,7 @@ void test_vlseg5e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_mu( @@ -508,6 +508,6 @@ void test_vlseg5e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e32ff_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64.c index 6351b3cdcd01..e52818ee0a8b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { - return vlseg5e64_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_tu( @@ -42,7 +42,7 @@ void test_vlseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { - return vlseg5e64_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_tu( @@ -61,7 +61,7 @@ void test_vlseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { - return vlseg5e64_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_tum( @@ -80,7 +80,7 @@ void test_vlseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { - return vlseg5e64_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_tum( @@ -99,7 +99,7 @@ void test_vlseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { - return vlseg5e64_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_tum( @@ -118,7 +118,7 @@ void test_vlseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { - return vlseg5e64_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_tumu( @@ -137,7 +137,7 @@ void test_vlseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { - return vlseg5e64_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_tumu( @@ -156,7 +156,7 @@ void test_vlseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { - return vlseg5e64_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_tumu( @@ -175,7 +175,7 @@ void test_vlseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { - return vlseg5e64_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_mu( @@ -194,7 +194,7 @@ void test_vlseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { - return vlseg5e64_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_mu( @@ -213,7 +213,7 @@ void test_vlseg5e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { - return vlseg5e64_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_mu( @@ -232,6 +232,6 @@ void test_vlseg5e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { - return vlseg5e64_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e64_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c index 05c23ad651ce..d6de316c4f0d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e64ff.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_tu( @@ -46,7 +46,7 @@ void test_vlseg5e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_tu( @@ -67,7 +67,7 @@ void test_vlseg5e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_tum( @@ -88,7 +88,7 @@ void test_vlseg5e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_tum( @@ -109,7 +109,7 @@ void test_vlseg5e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_tum( @@ -130,7 +130,7 @@ void test_vlseg5e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_tumu( @@ -151,7 +151,7 @@ void test_vlseg5e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_tumu( @@ -172,7 +172,7 @@ void test_vlseg5e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_tumu( @@ -193,7 +193,7 @@ void test_vlseg5e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_mu( @@ -214,7 +214,7 @@ void test_vlseg5e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_mu( @@ -235,7 +235,7 @@ void test_vlseg5e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_mu( @@ -256,6 +256,6 @@ void test_vlseg5e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e64ff_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8.c index c8cd90cc7a9c..e74180c8ae64 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8.c @@ -22,7 +22,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_tu( @@ -41,7 +41,7 @@ void test_vlseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_tu( @@ -60,7 +60,7 @@ void test_vlseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_tu( @@ -79,7 +79,7 @@ void test_vlseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_tu( @@ -98,7 +98,7 @@ void test_vlseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_tu( @@ -117,7 +117,7 @@ void test_vlseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_tu( @@ -136,7 +136,7 @@ void test_vlseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_tu( @@ -155,7 +155,7 @@ void test_vlseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_tum( @@ -174,7 +174,7 @@ void test_vlseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_tum( @@ -193,7 +193,7 @@ void test_vlseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_tum( @@ -212,7 +212,7 @@ void test_vlseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_tum( @@ -231,7 +231,7 @@ void test_vlseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_tum( @@ -250,7 +250,7 @@ void test_vlseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_tum( @@ -269,7 +269,7 @@ void test_vlseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_tum( @@ -288,7 +288,7 @@ void test_vlseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_tum( @@ -307,7 +307,7 @@ void test_vlseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_tumu( @@ -326,7 +326,7 @@ void test_vlseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_tumu( @@ -345,7 +345,7 @@ void test_vlseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_tumu( @@ -364,7 +364,7 @@ void test_vlseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_tumu( @@ -383,7 +383,7 @@ void test_vlseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_tumu( @@ -402,7 +402,7 @@ void test_vlseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_tumu( @@ -421,7 +421,7 @@ void test_vlseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_tumu( @@ -440,7 +440,7 @@ void test_vlseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_tumu( @@ -459,7 +459,7 @@ void test_vlseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_mu( @@ -478,7 +478,7 @@ void test_vlseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_mu( @@ -497,7 +497,7 @@ void test_vlseg5e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_mu( @@ -516,7 +516,7 @@ void test_vlseg5e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_mu( @@ -535,7 +535,7 @@ void test_vlseg5e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_mu( @@ -554,7 +554,7 @@ void test_vlseg5e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_mu( @@ -573,7 +573,7 @@ void test_vlseg5e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_mu( @@ -592,7 +592,7 @@ void test_vlseg5e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_mu( @@ -611,6 +611,6 @@ void test_vlseg5e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); + return __riscv_vlseg5e8_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c index 19d1069d455c..0d10e1bbf0bd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg5e8ff.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_tu( @@ -46,7 +46,7 @@ void test_vlseg5e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_tu( @@ -67,7 +67,7 @@ void test_vlseg5e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_tu( @@ -88,7 +88,7 @@ void test_vlseg5e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_tu( @@ -109,7 +109,7 @@ void test_vlseg5e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_tu( @@ -130,7 +130,7 @@ void test_vlseg5e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_tu( @@ -151,7 +151,7 @@ void test_vlseg5e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_tu( @@ -172,7 +172,7 @@ void test_vlseg5e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_tum( @@ -193,7 +193,7 @@ void test_vlseg5e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_tum( @@ -214,7 +214,7 @@ void test_vlseg5e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_tum( @@ -235,7 +235,7 @@ void test_vlseg5e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_tum( @@ -256,7 +256,7 @@ void test_vlseg5e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_tum( @@ -277,7 +277,7 @@ void test_vlseg5e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_tum( @@ -298,7 +298,7 @@ void test_vlseg5e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_tum( @@ -319,7 +319,7 @@ void test_vlseg5e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_tum( @@ -340,7 +340,7 @@ void test_vlseg5e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_tumu( @@ -361,7 +361,7 @@ void test_vlseg5e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_tumu( @@ -382,7 +382,7 @@ void test_vlseg5e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_tumu( @@ -403,7 +403,7 @@ void test_vlseg5e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_tumu( @@ -424,7 +424,7 @@ void test_vlseg5e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_tumu( @@ -445,7 +445,7 @@ void test_vlseg5e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_tumu( @@ -466,7 +466,7 @@ void test_vlseg5e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_tumu( @@ -487,7 +487,7 @@ void test_vlseg5e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_tumu( @@ -508,7 +508,7 @@ void test_vlseg5e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_mu( @@ -529,7 +529,7 @@ void test_vlseg5e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_mu( @@ -550,7 +550,7 @@ void test_vlseg5e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_mu( @@ -571,7 +571,7 @@ void test_vlseg5e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_mu( @@ -592,7 +592,7 @@ void test_vlseg5e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_mu( @@ -613,7 +613,7 @@ void test_vlseg5e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_mu( @@ -634,7 +634,7 @@ void test_vlseg5e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_mu( @@ -655,7 +655,7 @@ void test_vlseg5e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_mu( @@ -676,6 +676,6 @@ void test_vlseg5e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg5e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); + return __riscv_vlseg5e8ff_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16.c index 705a2f032566..2b7179e6c0cf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_tu( @@ -46,7 +46,7 @@ void test_vlseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_tu( @@ -67,7 +67,7 @@ void test_vlseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_tu( @@ -88,7 +88,7 @@ void test_vlseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_tu( @@ -109,7 +109,7 @@ void test_vlseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_tu( @@ -130,7 +130,7 @@ void test_vlseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_tu( @@ -151,7 +151,7 @@ void test_vlseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_tu( @@ -172,7 +172,7 @@ void test_vlseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_tu( @@ -193,7 +193,7 @@ void test_vlseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_tum( @@ -214,7 +214,7 @@ void test_vlseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_tum( @@ -235,7 +235,7 @@ void test_vlseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_tum( @@ -256,7 +256,7 @@ void test_vlseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_tum( @@ -277,7 +277,7 @@ void test_vlseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_tum( @@ -298,7 +298,7 @@ void test_vlseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_tum( @@ -319,7 +319,7 @@ void test_vlseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_tum( @@ -340,7 +340,7 @@ void test_vlseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_tum( @@ -361,7 +361,7 @@ void test_vlseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_tum( @@ -382,7 +382,7 @@ void test_vlseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_tumu( @@ -403,7 +403,7 @@ void test_vlseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_tumu( @@ -424,7 +424,7 @@ void test_vlseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_tumu( @@ -445,7 +445,7 @@ void test_vlseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_tumu( @@ -466,7 +466,7 @@ void test_vlseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_tumu( @@ -487,7 +487,7 @@ void test_vlseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_tumu( @@ -508,7 +508,7 @@ void test_vlseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_tumu( @@ -529,7 +529,7 @@ void test_vlseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_tumu( @@ -550,7 +550,7 @@ void test_vlseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_tumu( @@ -571,7 +571,7 @@ void test_vlseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_mu( @@ -592,7 +592,7 @@ void test_vlseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_mu( @@ -613,7 +613,7 @@ void test_vlseg6e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_mu( @@ -634,7 +634,7 @@ void test_vlseg6e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_mu( @@ -655,7 +655,7 @@ void test_vlseg6e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_mu( @@ -676,7 +676,7 @@ void test_vlseg6e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_mu( @@ -697,7 +697,7 @@ void test_vlseg6e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_mu( @@ -718,7 +718,7 @@ void test_vlseg6e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_mu( @@ -739,7 +739,7 @@ void test_vlseg6e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_mu( @@ -760,6 +760,6 @@ void test_vlseg6e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c index 01124127d8f2..f5531e20e60d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e16ff.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_tu( @@ -50,7 +50,7 @@ void test_vlseg6e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_tu( @@ -73,7 +73,7 @@ void test_vlseg6e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_tu( @@ -96,7 +96,7 @@ void test_vlseg6e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_tu( @@ -119,7 +119,7 @@ void test_vlseg6e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_tu( @@ -142,7 +142,7 @@ void test_vlseg6e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_tu( @@ -165,7 +165,7 @@ void test_vlseg6e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_tu( @@ -188,7 +188,7 @@ void test_vlseg6e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_tu( @@ -211,7 +211,7 @@ void test_vlseg6e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_tum( @@ -234,7 +234,7 @@ void test_vlseg6e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_tum( @@ -257,7 +257,7 @@ void test_vlseg6e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_tum( @@ -280,7 +280,7 @@ void test_vlseg6e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_tum( @@ -303,7 +303,7 @@ void test_vlseg6e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_tum( @@ -326,7 +326,7 @@ void test_vlseg6e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_tum( @@ -349,7 +349,7 @@ void test_vlseg6e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_tum( @@ -372,7 +372,7 @@ void test_vlseg6e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_tum( @@ -395,7 +395,7 @@ void test_vlseg6e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_tum( @@ -418,7 +418,7 @@ void test_vlseg6e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_tumu( @@ -441,7 +441,7 @@ void test_vlseg6e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_tumu( @@ -464,7 +464,7 @@ void test_vlseg6e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_tumu( @@ -487,7 +487,7 @@ void test_vlseg6e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_tumu( @@ -510,7 +510,7 @@ void test_vlseg6e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_tumu( @@ -533,7 +533,7 @@ void test_vlseg6e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_tumu( @@ -556,7 +556,7 @@ void test_vlseg6e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_tumu( @@ -579,7 +579,7 @@ void test_vlseg6e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_tumu( @@ -602,7 +602,7 @@ void test_vlseg6e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_tumu( @@ -625,7 +625,7 @@ void test_vlseg6e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_mu( @@ -648,7 +648,7 @@ void test_vlseg6e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_mu( @@ -671,7 +671,7 @@ void test_vlseg6e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_mu( @@ -694,7 +694,7 @@ void test_vlseg6e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_mu( @@ -717,7 +717,7 @@ void test_vlseg6e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_mu( @@ -740,7 +740,7 @@ void test_vlseg6e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_mu( @@ -763,7 +763,7 @@ void test_vlseg6e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_mu( @@ -786,7 +786,7 @@ void test_vlseg6e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_mu( @@ -809,7 +809,7 @@ void test_vlseg6e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_mu( @@ -832,6 +832,6 @@ void test_vlseg6e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e16ff_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32.c index 07f469aca7bf..224d0a239cda 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_tu( @@ -46,7 +46,7 @@ void test_vlseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_tu( @@ -67,7 +67,7 @@ void test_vlseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_tu( @@ -88,7 +88,7 @@ void test_vlseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_tu( @@ -109,7 +109,7 @@ void test_vlseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_tu( @@ -130,7 +130,7 @@ void test_vlseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_tum( @@ -151,7 +151,7 @@ void test_vlseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_tum( @@ -172,7 +172,7 @@ void test_vlseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_tum( @@ -193,7 +193,7 @@ void test_vlseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_tum( @@ -214,7 +214,7 @@ void test_vlseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_tum( @@ -235,7 +235,7 @@ void test_vlseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_tum( @@ -256,7 +256,7 @@ void test_vlseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_tumu( @@ -277,7 +277,7 @@ void test_vlseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_tumu( @@ -298,7 +298,7 @@ void test_vlseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_tumu( @@ -319,7 +319,7 @@ void test_vlseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_tumu( @@ -340,7 +340,7 @@ void test_vlseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_tumu( @@ -361,7 +361,7 @@ void test_vlseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_tumu( @@ -382,7 +382,7 @@ void test_vlseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_mu( @@ -403,7 +403,7 @@ void test_vlseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_mu( @@ -424,7 +424,7 @@ void test_vlseg6e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_mu( @@ -445,7 +445,7 @@ void test_vlseg6e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_mu( @@ -466,7 +466,7 @@ void test_vlseg6e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_mu( @@ -487,7 +487,7 @@ void test_vlseg6e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_mu( @@ -508,6 +508,6 @@ void test_vlseg6e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c index f8127630932b..f02db224a3b9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e32ff.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_tu( @@ -50,7 +50,7 @@ void test_vlseg6e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_tu( @@ -73,7 +73,7 @@ void test_vlseg6e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_tu( @@ -96,7 +96,7 @@ void test_vlseg6e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_tu( @@ -119,7 +119,7 @@ void test_vlseg6e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_tu( @@ -142,7 +142,7 @@ void test_vlseg6e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_tum( @@ -165,7 +165,7 @@ void test_vlseg6e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_tum( @@ -188,7 +188,7 @@ void test_vlseg6e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_tum( @@ -211,7 +211,7 @@ void test_vlseg6e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_tum( @@ -234,7 +234,7 @@ void test_vlseg6e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_tum( @@ -257,7 +257,7 @@ void test_vlseg6e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_tum( @@ -280,7 +280,7 @@ void test_vlseg6e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_tumu( @@ -303,7 +303,7 @@ void test_vlseg6e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_tumu( @@ -326,7 +326,7 @@ void test_vlseg6e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_tumu( @@ -349,7 +349,7 @@ void test_vlseg6e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_tumu( @@ -372,7 +372,7 @@ void test_vlseg6e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_tumu( @@ -395,7 +395,7 @@ void test_vlseg6e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_tumu( @@ -418,7 +418,7 @@ void test_vlseg6e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_mu( @@ -441,7 +441,7 @@ void test_vlseg6e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_mu( @@ -464,7 +464,7 @@ void test_vlseg6e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_mu( @@ -487,7 +487,7 @@ void test_vlseg6e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_mu( @@ -510,7 +510,7 @@ void test_vlseg6e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_mu( @@ -533,7 +533,7 @@ void test_vlseg6e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_mu( @@ -556,6 +556,6 @@ void test_vlseg6e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e32ff_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64.c index 70e814f7bc33..8a143d620ee5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { - return vlseg6e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_tu( @@ -46,7 +46,7 @@ void test_vlseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { - return vlseg6e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_tu( @@ -67,7 +67,7 @@ void test_vlseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { - return vlseg6e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_tum( @@ -88,7 +88,7 @@ void test_vlseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { - return vlseg6e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_tum( @@ -109,7 +109,7 @@ void test_vlseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { - return vlseg6e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_tum( @@ -130,7 +130,7 @@ void test_vlseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { - return vlseg6e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_tumu( @@ -151,7 +151,7 @@ void test_vlseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { - return vlseg6e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_tumu( @@ -172,7 +172,7 @@ void test_vlseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { - return vlseg6e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_tumu( @@ -193,7 +193,7 @@ void test_vlseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { - return vlseg6e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_mu( @@ -214,7 +214,7 @@ void test_vlseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { - return vlseg6e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_mu( @@ -235,7 +235,7 @@ void test_vlseg6e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { - return vlseg6e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_mu( @@ -256,6 +256,6 @@ void test_vlseg6e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { - return vlseg6e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c index 996432bc2650..01082ca81b20 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e64ff.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_tu( @@ -50,7 +50,7 @@ void test_vlseg6e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_tu( @@ -73,7 +73,7 @@ void test_vlseg6e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_tum( @@ -96,7 +96,7 @@ void test_vlseg6e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_tum( @@ -119,7 +119,7 @@ void test_vlseg6e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_tum( @@ -142,7 +142,7 @@ void test_vlseg6e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_tumu( @@ -165,7 +165,7 @@ void test_vlseg6e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_tumu( @@ -188,7 +188,7 @@ void test_vlseg6e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_tumu( @@ -211,7 +211,7 @@ void test_vlseg6e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_mu( @@ -234,7 +234,7 @@ void test_vlseg6e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_mu( @@ -257,7 +257,7 @@ void test_vlseg6e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_mu( @@ -280,6 +280,6 @@ void test_vlseg6e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e64ff_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8.c index 498e6b2459a3..779925ea7eda 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8.c @@ -24,7 +24,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_tu( @@ -45,7 +45,7 @@ void test_vlseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_tu( @@ -66,7 +66,7 @@ void test_vlseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_tu( @@ -87,7 +87,7 @@ void test_vlseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_tu( @@ -108,7 +108,7 @@ void test_vlseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_tu( @@ -129,7 +129,7 @@ void test_vlseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_tu( @@ -150,7 +150,7 @@ void test_vlseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_tu( @@ -171,7 +171,7 @@ void test_vlseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_tum( @@ -192,7 +192,7 @@ void test_vlseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_tum( @@ -213,7 +213,7 @@ void test_vlseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_tum( @@ -234,7 +234,7 @@ void test_vlseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_tum( @@ -255,7 +255,7 @@ void test_vlseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_tum( @@ -276,7 +276,7 @@ void test_vlseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_tum( @@ -297,7 +297,7 @@ void test_vlseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_tum( @@ -318,7 +318,7 @@ void test_vlseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_tum( @@ -339,7 +339,7 @@ void test_vlseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_tumu( @@ -360,7 +360,7 @@ void test_vlseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_tumu( @@ -381,7 +381,7 @@ void test_vlseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_tumu( @@ -402,7 +402,7 @@ void test_vlseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_tumu( @@ -423,7 +423,7 @@ void test_vlseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_tumu( @@ -444,7 +444,7 @@ void test_vlseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_tumu( @@ -465,7 +465,7 @@ void test_vlseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_tumu( @@ -486,7 +486,7 @@ void test_vlseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_tumu( @@ -507,7 +507,7 @@ void test_vlseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_mu( @@ -528,7 +528,7 @@ void test_vlseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_mu( @@ -549,7 +549,7 @@ void test_vlseg6e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_mu( @@ -570,7 +570,7 @@ void test_vlseg6e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_mu( @@ -591,7 +591,7 @@ void test_vlseg6e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_mu( @@ -612,7 +612,7 @@ void test_vlseg6e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_mu( @@ -633,7 +633,7 @@ void test_vlseg6e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_mu( @@ -654,7 +654,7 @@ void test_vlseg6e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_mu( @@ -675,6 +675,6 @@ void test_vlseg6e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); + return __riscv_vlseg6e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c index b5e20ad1f1b0..025da3f1f7aa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg6e8ff.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_tu( @@ -50,7 +50,7 @@ void test_vlseg6e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_tu( @@ -73,7 +73,7 @@ void test_vlseg6e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_tu( @@ -96,7 +96,7 @@ void test_vlseg6e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_tu( @@ -119,7 +119,7 @@ void test_vlseg6e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_tu( @@ -142,7 +142,7 @@ void test_vlseg6e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_tu( @@ -165,7 +165,7 @@ void test_vlseg6e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_tu( @@ -188,7 +188,7 @@ void test_vlseg6e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_tum( @@ -211,7 +211,7 @@ void test_vlseg6e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_tum( @@ -234,7 +234,7 @@ void test_vlseg6e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_tum( @@ -257,7 +257,7 @@ void test_vlseg6e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_tum( @@ -280,7 +280,7 @@ void test_vlseg6e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_tum( @@ -303,7 +303,7 @@ void test_vlseg6e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_tum( @@ -326,7 +326,7 @@ void test_vlseg6e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_tum( @@ -349,7 +349,7 @@ void test_vlseg6e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_tum( @@ -372,7 +372,7 @@ void test_vlseg6e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_tumu( @@ -395,7 +395,7 @@ void test_vlseg6e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_tumu( @@ -418,7 +418,7 @@ void test_vlseg6e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_tumu( @@ -441,7 +441,7 @@ void test_vlseg6e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_tumu( @@ -464,7 +464,7 @@ void test_vlseg6e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_tumu( @@ -487,7 +487,7 @@ void test_vlseg6e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_tumu( @@ -510,7 +510,7 @@ void test_vlseg6e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_tumu( @@ -533,7 +533,7 @@ void test_vlseg6e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_tumu( @@ -556,7 +556,7 @@ void test_vlseg6e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_mu( @@ -579,7 +579,7 @@ void test_vlseg6e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_mu( @@ -602,7 +602,7 @@ void test_vlseg6e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_mu( @@ -625,7 +625,7 @@ void test_vlseg6e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_mu( @@ -648,7 +648,7 @@ void test_vlseg6e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_mu( @@ -671,7 +671,7 @@ void test_vlseg6e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_mu( @@ -694,7 +694,7 @@ void test_vlseg6e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_mu( @@ -717,7 +717,7 @@ void test_vlseg6e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_mu( @@ -740,6 +740,6 @@ void test_vlseg6e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg6e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); + return __riscv_vlseg6e8ff_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16.c index 46cfd9970fd8..b84e63774c8b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_tu( @@ -50,7 +50,7 @@ void test_vlseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_tu( @@ -73,7 +73,7 @@ void test_vlseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_tu( @@ -96,7 +96,7 @@ void test_vlseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_tu( @@ -119,7 +119,7 @@ void test_vlseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_tu( @@ -142,7 +142,7 @@ void test_vlseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_tu( @@ -165,7 +165,7 @@ void test_vlseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_tu( @@ -188,7 +188,7 @@ void test_vlseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_tu( @@ -211,7 +211,7 @@ void test_vlseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_tum( @@ -234,7 +234,7 @@ void test_vlseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_tum( @@ -257,7 +257,7 @@ void test_vlseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_tum( @@ -280,7 +280,7 @@ void test_vlseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_tum( @@ -303,7 +303,7 @@ void test_vlseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_tum( @@ -326,7 +326,7 @@ void test_vlseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_tum( @@ -349,7 +349,7 @@ void test_vlseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_tum( @@ -372,7 +372,7 @@ void test_vlseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_tum( @@ -395,7 +395,7 @@ void test_vlseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_tum( @@ -418,7 +418,7 @@ void test_vlseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_tumu( @@ -441,7 +441,7 @@ void test_vlseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_tumu( @@ -464,7 +464,7 @@ void test_vlseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_tumu( @@ -487,7 +487,7 @@ void test_vlseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_tumu( @@ -510,7 +510,7 @@ void test_vlseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_tumu( @@ -533,7 +533,7 @@ void test_vlseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_tumu( @@ -556,7 +556,7 @@ void test_vlseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_tumu( @@ -579,7 +579,7 @@ void test_vlseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_tumu( @@ -602,7 +602,7 @@ void test_vlseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_tumu( @@ -625,7 +625,7 @@ void test_vlseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_mu( @@ -648,7 +648,7 @@ void test_vlseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_mu( @@ -671,7 +671,7 @@ void test_vlseg7e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_mu( @@ -694,7 +694,7 @@ void test_vlseg7e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_mu( @@ -717,7 +717,7 @@ void test_vlseg7e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_mu( @@ -740,7 +740,7 @@ void test_vlseg7e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_mu( @@ -763,7 +763,7 @@ void test_vlseg7e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_mu( @@ -786,7 +786,7 @@ void test_vlseg7e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_mu( @@ -809,7 +809,7 @@ void test_vlseg7e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_mu( @@ -832,6 +832,6 @@ void test_vlseg7e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c index 6985447d1a12..3a7900ce2f4b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e16ff.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_tu( @@ -54,7 +54,7 @@ void test_vlseg7e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_tu( @@ -79,7 +79,7 @@ void test_vlseg7e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_tu( @@ -104,7 +104,7 @@ void test_vlseg7e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_tu( @@ -129,7 +129,7 @@ void test_vlseg7e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_tu( @@ -154,7 +154,7 @@ void test_vlseg7e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_tu( @@ -179,7 +179,7 @@ void test_vlseg7e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_tu( @@ -204,7 +204,7 @@ void test_vlseg7e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_tu( @@ -229,7 +229,7 @@ void test_vlseg7e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_tum( @@ -254,7 +254,7 @@ void test_vlseg7e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_tum( @@ -279,7 +279,7 @@ void test_vlseg7e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_tum( @@ -304,7 +304,7 @@ void test_vlseg7e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_tum( @@ -329,7 +329,7 @@ void test_vlseg7e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_tum( @@ -354,7 +354,7 @@ void test_vlseg7e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_tum( @@ -379,7 +379,7 @@ void test_vlseg7e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_tum( @@ -404,7 +404,7 @@ void test_vlseg7e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_tum( @@ -429,7 +429,7 @@ void test_vlseg7e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_tum( @@ -454,7 +454,7 @@ void test_vlseg7e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_tumu( @@ -479,7 +479,7 @@ void test_vlseg7e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_tumu( @@ -504,7 +504,7 @@ void test_vlseg7e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_tumu( @@ -529,7 +529,7 @@ void test_vlseg7e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_tumu( @@ -554,7 +554,7 @@ void test_vlseg7e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_tumu( @@ -579,7 +579,7 @@ void test_vlseg7e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_tumu( @@ -604,7 +604,7 @@ void test_vlseg7e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_tumu( @@ -629,7 +629,7 @@ void test_vlseg7e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_tumu( @@ -654,7 +654,7 @@ void test_vlseg7e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_tumu( @@ -679,7 +679,7 @@ void test_vlseg7e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_mu( @@ -704,7 +704,7 @@ void test_vlseg7e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_mu( @@ -729,7 +729,7 @@ void test_vlseg7e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_mu( @@ -754,7 +754,7 @@ void test_vlseg7e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_mu( @@ -779,7 +779,7 @@ void test_vlseg7e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_mu( @@ -804,7 +804,7 @@ void test_vlseg7e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_mu( @@ -829,7 +829,7 @@ void test_vlseg7e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_mu( @@ -854,7 +854,7 @@ void test_vlseg7e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_mu( @@ -879,7 +879,7 @@ void test_vlseg7e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_mu( @@ -904,6 +904,6 @@ void test_vlseg7e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e16ff_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32.c index 16204899398a..2f0220bfd7c2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_tu( @@ -50,7 +50,7 @@ void test_vlseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_tu( @@ -73,7 +73,7 @@ void test_vlseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_tu( @@ -96,7 +96,7 @@ void test_vlseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_tu( @@ -119,7 +119,7 @@ void test_vlseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_tu( @@ -142,7 +142,7 @@ void test_vlseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_tum( @@ -165,7 +165,7 @@ void test_vlseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_tum( @@ -188,7 +188,7 @@ void test_vlseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_tum( @@ -211,7 +211,7 @@ void test_vlseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_tum( @@ -234,7 +234,7 @@ void test_vlseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_tum( @@ -257,7 +257,7 @@ void test_vlseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_tum( @@ -280,7 +280,7 @@ void test_vlseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_tumu( @@ -303,7 +303,7 @@ void test_vlseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_tumu( @@ -326,7 +326,7 @@ void test_vlseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_tumu( @@ -349,7 +349,7 @@ void test_vlseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_tumu( @@ -372,7 +372,7 @@ void test_vlseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_tumu( @@ -395,7 +395,7 @@ void test_vlseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_tumu( @@ -418,7 +418,7 @@ void test_vlseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_mu( @@ -441,7 +441,7 @@ void test_vlseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_mu( @@ -464,7 +464,7 @@ void test_vlseg7e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_mu( @@ -487,7 +487,7 @@ void test_vlseg7e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_mu( @@ -510,7 +510,7 @@ void test_vlseg7e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_mu( @@ -533,7 +533,7 @@ void test_vlseg7e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_mu( @@ -556,6 +556,6 @@ void test_vlseg7e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c index 2c93c5769322..e1992870743b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e32ff.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_tu( @@ -54,7 +54,7 @@ void test_vlseg7e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_tu( @@ -79,7 +79,7 @@ void test_vlseg7e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_tu( @@ -104,7 +104,7 @@ void test_vlseg7e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_tu( @@ -129,7 +129,7 @@ void test_vlseg7e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_tu( @@ -154,7 +154,7 @@ void test_vlseg7e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_tum( @@ -179,7 +179,7 @@ void test_vlseg7e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_tum( @@ -204,7 +204,7 @@ void test_vlseg7e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_tum( @@ -229,7 +229,7 @@ void test_vlseg7e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_tum( @@ -254,7 +254,7 @@ void test_vlseg7e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_tum( @@ -279,7 +279,7 @@ void test_vlseg7e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_tum( @@ -304,7 +304,7 @@ void test_vlseg7e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_tumu( @@ -329,7 +329,7 @@ void test_vlseg7e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_tumu( @@ -354,7 +354,7 @@ void test_vlseg7e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_tumu( @@ -379,7 +379,7 @@ void test_vlseg7e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_tumu( @@ -404,7 +404,7 @@ void test_vlseg7e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_tumu( @@ -429,7 +429,7 @@ void test_vlseg7e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_tumu( @@ -454,7 +454,7 @@ void test_vlseg7e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_mu( @@ -479,7 +479,7 @@ void test_vlseg7e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_mu( @@ -504,7 +504,7 @@ void test_vlseg7e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_mu( @@ -529,7 +529,7 @@ void test_vlseg7e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_mu( @@ -554,7 +554,7 @@ void test_vlseg7e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_mu( @@ -579,7 +579,7 @@ void test_vlseg7e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_mu( @@ -604,6 +604,6 @@ void test_vlseg7e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e32ff_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64.c index d62af5b266e2..7455dc25cdb9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { - return vlseg7e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_tu( @@ -50,7 +50,7 @@ void test_vlseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { - return vlseg7e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_tu( @@ -73,7 +73,7 @@ void test_vlseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { - return vlseg7e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_tum( @@ -96,7 +96,7 @@ void test_vlseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { - return vlseg7e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_tum( @@ -119,7 +119,7 @@ void test_vlseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { - return vlseg7e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_tum( @@ -142,7 +142,7 @@ void test_vlseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { - return vlseg7e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_tumu( @@ -165,7 +165,7 @@ void test_vlseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { - return vlseg7e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_tumu( @@ -188,7 +188,7 @@ void test_vlseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { - return vlseg7e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_tumu( @@ -211,7 +211,7 @@ void test_vlseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { - return vlseg7e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_mu( @@ -234,7 +234,7 @@ void test_vlseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { - return vlseg7e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_mu( @@ -257,7 +257,7 @@ void test_vlseg7e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { - return vlseg7e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_mu( @@ -280,6 +280,6 @@ void test_vlseg7e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { - return vlseg7e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c index ffa3a20501d2..20ed4797082e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e64ff.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_tu( @@ -54,7 +54,7 @@ void test_vlseg7e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_tu( @@ -79,7 +79,7 @@ void test_vlseg7e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_tum( @@ -104,7 +104,7 @@ void test_vlseg7e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_tum( @@ -129,7 +129,7 @@ void test_vlseg7e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_tum( @@ -154,7 +154,7 @@ void test_vlseg7e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_tumu( @@ -179,7 +179,7 @@ void test_vlseg7e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_tumu( @@ -204,7 +204,7 @@ void test_vlseg7e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_tumu( @@ -229,7 +229,7 @@ void test_vlseg7e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_mu( @@ -254,7 +254,7 @@ void test_vlseg7e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_mu( @@ -279,7 +279,7 @@ void test_vlseg7e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_mu( @@ -304,6 +304,6 @@ void test_vlseg7e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e64ff_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8.c index df0a095cecde..bacb1f85b02f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8.c @@ -26,7 +26,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_tu( @@ -49,7 +49,7 @@ void test_vlseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_tu( @@ -72,7 +72,7 @@ void test_vlseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_tu( @@ -95,7 +95,7 @@ void test_vlseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_tu( @@ -118,7 +118,7 @@ void test_vlseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_tu( @@ -141,7 +141,7 @@ void test_vlseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_tu( @@ -164,7 +164,7 @@ void test_vlseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_tu( @@ -187,7 +187,7 @@ void test_vlseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_tum( @@ -210,7 +210,7 @@ void test_vlseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_tum( @@ -233,7 +233,7 @@ void test_vlseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_tum( @@ -256,7 +256,7 @@ void test_vlseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_tum( @@ -279,7 +279,7 @@ void test_vlseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_tum( @@ -302,7 +302,7 @@ void test_vlseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_tum( @@ -325,7 +325,7 @@ void test_vlseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_tum( @@ -348,7 +348,7 @@ void test_vlseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_tum( @@ -371,7 +371,7 @@ void test_vlseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_tumu( @@ -394,7 +394,7 @@ void test_vlseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_tumu( @@ -417,7 +417,7 @@ void test_vlseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_tumu( @@ -440,7 +440,7 @@ void test_vlseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_tumu( @@ -463,7 +463,7 @@ void test_vlseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_tumu( @@ -486,7 +486,7 @@ void test_vlseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_tumu( @@ -509,7 +509,7 @@ void test_vlseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_tumu( @@ -532,7 +532,7 @@ void test_vlseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_tumu( @@ -555,7 +555,7 @@ void test_vlseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_mu( @@ -578,7 +578,7 @@ void test_vlseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_mu( @@ -601,7 +601,7 @@ void test_vlseg7e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_mu( @@ -624,7 +624,7 @@ void test_vlseg7e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_mu( @@ -647,7 +647,7 @@ void test_vlseg7e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_mu( @@ -670,7 +670,7 @@ void test_vlseg7e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_mu( @@ -693,7 +693,7 @@ void test_vlseg7e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_mu( @@ -716,7 +716,7 @@ void test_vlseg7e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_mu( @@ -739,6 +739,6 @@ void test_vlseg7e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); + return __riscv_vlseg7e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c index 2841b0606afd..872470af6130 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg7e8ff.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_tu( @@ -54,7 +54,7 @@ void test_vlseg7e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_tu( @@ -79,7 +79,7 @@ void test_vlseg7e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_tu( @@ -104,7 +104,7 @@ void test_vlseg7e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_tu( @@ -129,7 +129,7 @@ void test_vlseg7e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_tu( @@ -154,7 +154,7 @@ void test_vlseg7e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_tu( @@ -179,7 +179,7 @@ void test_vlseg7e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_tu( @@ -204,7 +204,7 @@ void test_vlseg7e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_tum( @@ -229,7 +229,7 @@ void test_vlseg7e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_tum( @@ -254,7 +254,7 @@ void test_vlseg7e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_tum( @@ -279,7 +279,7 @@ void test_vlseg7e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_tum( @@ -304,7 +304,7 @@ void test_vlseg7e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_tum( @@ -329,7 +329,7 @@ void test_vlseg7e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_tum( @@ -354,7 +354,7 @@ void test_vlseg7e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_tum( @@ -379,7 +379,7 @@ void test_vlseg7e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_tum( @@ -404,7 +404,7 @@ void test_vlseg7e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_tumu( @@ -429,7 +429,7 @@ void test_vlseg7e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_tumu( @@ -454,7 +454,7 @@ void test_vlseg7e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_tumu( @@ -479,7 +479,7 @@ void test_vlseg7e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_tumu( @@ -504,7 +504,7 @@ void test_vlseg7e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_tumu( @@ -529,7 +529,7 @@ void test_vlseg7e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_tumu( @@ -554,7 +554,7 @@ void test_vlseg7e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_tumu( @@ -579,7 +579,7 @@ void test_vlseg7e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_tumu( @@ -604,7 +604,7 @@ void test_vlseg7e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_mu( @@ -629,7 +629,7 @@ void test_vlseg7e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_mu( @@ -654,7 +654,7 @@ void test_vlseg7e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_mu( @@ -679,7 +679,7 @@ void test_vlseg7e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_mu( @@ -704,7 +704,7 @@ void test_vlseg7e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_mu( @@ -729,7 +729,7 @@ void test_vlseg7e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_mu( @@ -754,7 +754,7 @@ void test_vlseg7e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_mu( @@ -779,7 +779,7 @@ void test_vlseg7e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_mu( @@ -804,6 +804,6 @@ void test_vlseg7e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg7e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); + return __riscv_vlseg7e8ff_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16.c index 05f4ad9645e2..915e48c5b5f9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_tu( @@ -54,7 +54,7 @@ void test_vlseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_tu( @@ -79,7 +79,7 @@ void test_vlseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_tu( @@ -104,7 +104,7 @@ void test_vlseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_tu( @@ -129,7 +129,7 @@ void test_vlseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_tu( @@ -154,7 +154,7 @@ void test_vlseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_tu( @@ -179,7 +179,7 @@ void test_vlseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_tu( @@ -204,7 +204,7 @@ void test_vlseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_tu( @@ -229,7 +229,7 @@ void test_vlseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_tum( @@ -254,7 +254,7 @@ void test_vlseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_tum( @@ -279,7 +279,7 @@ void test_vlseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_tum( @@ -304,7 +304,7 @@ void test_vlseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_tum( @@ -329,7 +329,7 @@ void test_vlseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_tum( @@ -354,7 +354,7 @@ void test_vlseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_tum( @@ -379,7 +379,7 @@ void test_vlseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_tum( @@ -404,7 +404,7 @@ void test_vlseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_tum( @@ -429,7 +429,7 @@ void test_vlseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_tum( @@ -454,7 +454,7 @@ void test_vlseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_tumu( @@ -479,7 +479,7 @@ void test_vlseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_tumu( @@ -504,7 +504,7 @@ void test_vlseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_tumu( @@ -529,7 +529,7 @@ void test_vlseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_tumu( @@ -554,7 +554,7 @@ void test_vlseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_tumu( @@ -579,7 +579,7 @@ void test_vlseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_tumu( @@ -604,7 +604,7 @@ void test_vlseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_tumu( @@ -629,7 +629,7 @@ void test_vlseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_tumu( @@ -654,7 +654,7 @@ void test_vlseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_tumu( @@ -679,7 +679,7 @@ void test_vlseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_mu( @@ -704,7 +704,7 @@ void test_vlseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_mu( @@ -729,7 +729,7 @@ void test_vlseg8e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_mu( @@ -754,7 +754,7 @@ void test_vlseg8e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_mu( @@ -779,7 +779,7 @@ void test_vlseg8e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_mu( @@ -804,7 +804,7 @@ void test_vlseg8e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_mu( @@ -829,7 +829,7 @@ void test_vlseg8e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_mu( @@ -854,7 +854,7 @@ void test_vlseg8e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_mu( @@ -879,7 +879,7 @@ void test_vlseg8e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_mu( @@ -904,6 +904,6 @@ void test_vlseg8e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c index 7bcb292b058e..ac7dc7f382c0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e16ff.c @@ -31,7 +31,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_tu( @@ -58,7 +58,7 @@ void test_vlseg8e16ff_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_tu( @@ -85,7 +85,7 @@ void test_vlseg8e16ff_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_tu( @@ -112,7 +112,7 @@ void test_vlseg8e16ff_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_tu( @@ -139,7 +139,7 @@ void test_vlseg8e16ff_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_tu( @@ -166,7 +166,7 @@ void test_vlseg8e16ff_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_tu( @@ -193,7 +193,7 @@ void test_vlseg8e16ff_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_tu( @@ -220,7 +220,7 @@ void test_vlseg8e16ff_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_tu( @@ -247,7 +247,7 @@ void test_vlseg8e16ff_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_tum( @@ -274,7 +274,7 @@ void test_vlseg8e16ff_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_tum( @@ -301,7 +301,7 @@ void test_vlseg8e16ff_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_tum( @@ -328,7 +328,7 @@ void test_vlseg8e16ff_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_tum( @@ -355,7 +355,7 @@ void test_vlseg8e16ff_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_tum( @@ -382,7 +382,7 @@ void test_vlseg8e16ff_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_tum( @@ -409,7 +409,7 @@ void test_vlseg8e16ff_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_tum( @@ -436,7 +436,7 @@ void test_vlseg8e16ff_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_tum( @@ -463,7 +463,7 @@ void test_vlseg8e16ff_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_tum( @@ -490,7 +490,7 @@ void test_vlseg8e16ff_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_tumu( @@ -517,7 +517,7 @@ void test_vlseg8e16ff_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_tumu( @@ -544,7 +544,7 @@ void test_vlseg8e16ff_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_tumu( @@ -571,7 +571,7 @@ void test_vlseg8e16ff_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_tumu( @@ -598,7 +598,7 @@ void test_vlseg8e16ff_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_tumu( @@ -625,7 +625,7 @@ void test_vlseg8e16ff_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_tumu( @@ -652,7 +652,7 @@ void test_vlseg8e16ff_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_tumu( @@ -679,7 +679,7 @@ void test_vlseg8e16ff_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_tumu( @@ -706,7 +706,7 @@ void test_vlseg8e16ff_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_tumu( @@ -733,7 +733,7 @@ void test_vlseg8e16ff_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_mu( @@ -760,7 +760,7 @@ void test_vlseg8e16ff_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_mu( @@ -787,7 +787,7 @@ void test_vlseg8e16ff_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_mu( @@ -814,7 +814,7 @@ void test_vlseg8e16ff_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_mu( @@ -841,7 +841,7 @@ void test_vlseg8e16ff_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_mu( @@ -868,7 +868,7 @@ void test_vlseg8e16ff_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_mu( @@ -895,7 +895,7 @@ void test_vlseg8e16ff_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_mu( @@ -922,7 +922,7 @@ void test_vlseg8e16ff_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_mu( @@ -949,7 +949,7 @@ void test_vlseg8e16ff_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_mu( @@ -976,6 +976,6 @@ void test_vlseg8e16ff_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e16ff_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e16ff_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32.c index 86ade4caf189..f375335ee8c7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_tu( @@ -54,7 +54,7 @@ void test_vlseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_tu( @@ -79,7 +79,7 @@ void test_vlseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_tu( @@ -104,7 +104,7 @@ void test_vlseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_tu( @@ -129,7 +129,7 @@ void test_vlseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_tu( @@ -154,7 +154,7 @@ void test_vlseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_tum( @@ -179,7 +179,7 @@ void test_vlseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_tum( @@ -204,7 +204,7 @@ void test_vlseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_tum( @@ -229,7 +229,7 @@ void test_vlseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_tum( @@ -254,7 +254,7 @@ void test_vlseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_tum( @@ -279,7 +279,7 @@ void test_vlseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_tum( @@ -304,7 +304,7 @@ void test_vlseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_tumu( @@ -329,7 +329,7 @@ void test_vlseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_tumu( @@ -354,7 +354,7 @@ void test_vlseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_tumu( @@ -379,7 +379,7 @@ void test_vlseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_tumu( @@ -404,7 +404,7 @@ void test_vlseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_tumu( @@ -429,7 +429,7 @@ void test_vlseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_tumu( @@ -454,7 +454,7 @@ void test_vlseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_mu( @@ -479,7 +479,7 @@ void test_vlseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_mu( @@ -504,7 +504,7 @@ void test_vlseg8e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_mu( @@ -529,7 +529,7 @@ void test_vlseg8e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_mu( @@ -554,7 +554,7 @@ void test_vlseg8e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_mu( @@ -579,7 +579,7 @@ void test_vlseg8e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_mu( @@ -604,6 +604,6 @@ void test_vlseg8e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c index 69389c10817f..db1003af224d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e32ff.c @@ -31,7 +31,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_tu( @@ -58,7 +58,7 @@ void test_vlseg8e32ff_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_tu( @@ -85,7 +85,7 @@ void test_vlseg8e32ff_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_tu( @@ -112,7 +112,7 @@ void test_vlseg8e32ff_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_tu( @@ -139,7 +139,7 @@ void test_vlseg8e32ff_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_tu( @@ -166,7 +166,7 @@ void test_vlseg8e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_tum( @@ -193,7 +193,7 @@ void test_vlseg8e32ff_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_tum( @@ -220,7 +220,7 @@ void test_vlseg8e32ff_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_tum( @@ -247,7 +247,7 @@ void test_vlseg8e32ff_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_tum( @@ -274,7 +274,7 @@ void test_vlseg8e32ff_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_tum( @@ -301,7 +301,7 @@ void test_vlseg8e32ff_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_tum( @@ -328,7 +328,7 @@ void test_vlseg8e32ff_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_tumu( @@ -355,7 +355,7 @@ void test_vlseg8e32ff_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_tumu( @@ -382,7 +382,7 @@ void test_vlseg8e32ff_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_tumu( @@ -409,7 +409,7 @@ void test_vlseg8e32ff_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_tumu( @@ -436,7 +436,7 @@ void test_vlseg8e32ff_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_tumu( @@ -463,7 +463,7 @@ void test_vlseg8e32ff_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_tumu( @@ -490,7 +490,7 @@ void test_vlseg8e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_mu( @@ -517,7 +517,7 @@ void test_vlseg8e32ff_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_mu( @@ -544,7 +544,7 @@ void test_vlseg8e32ff_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_mu( @@ -571,7 +571,7 @@ void test_vlseg8e32ff_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_mu( @@ -598,7 +598,7 @@ void test_vlseg8e32ff_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_mu( @@ -625,7 +625,7 @@ void test_vlseg8e32ff_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_mu( @@ -652,6 +652,6 @@ void test_vlseg8e32ff_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e32ff_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e32ff_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64.c index 1c5eba589018..b6d7db2358fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { - return vlseg8e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_tu( @@ -54,7 +54,7 @@ void test_vlseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { - return vlseg8e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_tu( @@ -79,7 +79,7 @@ void test_vlseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { - return vlseg8e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_tum( @@ -104,7 +104,7 @@ void test_vlseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { - return vlseg8e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_tum( @@ -129,7 +129,7 @@ void test_vlseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { - return vlseg8e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_tum( @@ -154,7 +154,7 @@ void test_vlseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { - return vlseg8e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_tumu( @@ -179,7 +179,7 @@ void test_vlseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { - return vlseg8e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_tumu( @@ -204,7 +204,7 @@ void test_vlseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { - return vlseg8e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_tumu( @@ -229,7 +229,7 @@ void test_vlseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { - return vlseg8e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_mu( @@ -254,7 +254,7 @@ void test_vlseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { - return vlseg8e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_mu( @@ -279,7 +279,7 @@ void test_vlseg8e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { - return vlseg8e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_mu( @@ -304,6 +304,6 @@ void test_vlseg8e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { - return vlseg8e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c index 359eac8c1423..0282f8a0b904 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e64ff.c @@ -31,7 +31,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_tu( @@ -58,7 +58,7 @@ void test_vlseg8e64ff_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_tu( @@ -85,7 +85,7 @@ void test_vlseg8e64ff_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_tum( @@ -112,7 +112,7 @@ void test_vlseg8e64ff_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_tum( @@ -139,7 +139,7 @@ void test_vlseg8e64ff_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_tum( @@ -166,7 +166,7 @@ void test_vlseg8e64ff_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_tumu( @@ -193,7 +193,7 @@ void test_vlseg8e64ff_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_tumu( @@ -220,7 +220,7 @@ void test_vlseg8e64ff_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_tumu( @@ -247,7 +247,7 @@ void test_vlseg8e64ff_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_mu( @@ -274,7 +274,7 @@ void test_vlseg8e64ff_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_mu( @@ -301,7 +301,7 @@ void test_vlseg8e64ff_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_mu( @@ -328,6 +328,6 @@ void test_vlseg8e64ff_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e64ff_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e64ff_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8.c index 43304cbf0c66..f251fc600b98 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8.c @@ -28,7 +28,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_tu( @@ -53,7 +53,7 @@ void test_vlseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_tu( @@ -78,7 +78,7 @@ void test_vlseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_tu( @@ -103,7 +103,7 @@ void test_vlseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_tu( @@ -128,7 +128,7 @@ void test_vlseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_tu( @@ -153,7 +153,7 @@ void test_vlseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_tu( @@ -178,7 +178,7 @@ void test_vlseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_tu( @@ -203,7 +203,7 @@ void test_vlseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_tum( @@ -228,7 +228,7 @@ void test_vlseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_tum( @@ -253,7 +253,7 @@ void test_vlseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_tum( @@ -278,7 +278,7 @@ void test_vlseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_tum( @@ -303,7 +303,7 @@ void test_vlseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_tum( @@ -328,7 +328,7 @@ void test_vlseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_tum( @@ -353,7 +353,7 @@ void test_vlseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_tum( @@ -378,7 +378,7 @@ void test_vlseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_tum( @@ -403,7 +403,7 @@ void test_vlseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_tumu( @@ -428,7 +428,7 @@ void test_vlseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_tumu( @@ -453,7 +453,7 @@ void test_vlseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_tumu( @@ -478,7 +478,7 @@ void test_vlseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_tumu( @@ -503,7 +503,7 @@ void test_vlseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_tumu( @@ -528,7 +528,7 @@ void test_vlseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_tumu( @@ -553,7 +553,7 @@ void test_vlseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_tumu( @@ -578,7 +578,7 @@ void test_vlseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_tumu( @@ -603,7 +603,7 @@ void test_vlseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_mu( @@ -628,7 +628,7 @@ void test_vlseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_mu( @@ -653,7 +653,7 @@ void test_vlseg8e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_mu( @@ -678,7 +678,7 @@ void test_vlseg8e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_mu( @@ -703,7 +703,7 @@ void test_vlseg8e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_mu( @@ -728,7 +728,7 @@ void test_vlseg8e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_mu( @@ -753,7 +753,7 @@ void test_vlseg8e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_mu( @@ -778,7 +778,7 @@ void test_vlseg8e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_mu( @@ -803,6 +803,6 @@ void test_vlseg8e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); + return __riscv_vlseg8e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c index 4d5556f6e22f..15cadec9278b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlseg8e8ff.c @@ -31,7 +31,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_tu( @@ -58,7 +58,7 @@ void test_vlseg8e8ff_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_tu( @@ -85,7 +85,7 @@ void test_vlseg8e8ff_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_tu( @@ -112,7 +112,7 @@ void test_vlseg8e8ff_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_tu( @@ -139,7 +139,7 @@ void test_vlseg8e8ff_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_tu( @@ -166,7 +166,7 @@ void test_vlseg8e8ff_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_tu( @@ -193,7 +193,7 @@ void test_vlseg8e8ff_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_tu( @@ -220,7 +220,7 @@ void test_vlseg8e8ff_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_tum( @@ -247,7 +247,7 @@ void test_vlseg8e8ff_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_tum( @@ -274,7 +274,7 @@ void test_vlseg8e8ff_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_tum( @@ -301,7 +301,7 @@ void test_vlseg8e8ff_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_tum( @@ -328,7 +328,7 @@ void test_vlseg8e8ff_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_tum( @@ -355,7 +355,7 @@ void test_vlseg8e8ff_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_tum( @@ -382,7 +382,7 @@ void test_vlseg8e8ff_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_tum( @@ -409,7 +409,7 @@ void test_vlseg8e8ff_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_tum( @@ -436,7 +436,7 @@ void test_vlseg8e8ff_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_tumu( @@ -463,7 +463,7 @@ void test_vlseg8e8ff_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_tumu( @@ -490,7 +490,7 @@ void test_vlseg8e8ff_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_tumu( @@ -517,7 +517,7 @@ void test_vlseg8e8ff_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_tumu( @@ -544,7 +544,7 @@ void test_vlseg8e8ff_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_tumu( @@ -571,7 +571,7 @@ void test_vlseg8e8ff_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_tumu( @@ -598,7 +598,7 @@ void test_vlseg8e8ff_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_tumu( @@ -625,7 +625,7 @@ void test_vlseg8e8ff_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_tumu( @@ -652,7 +652,7 @@ void test_vlseg8e8ff_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_mu( @@ -679,7 +679,7 @@ void test_vlseg8e8ff_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_mu( @@ -706,7 +706,7 @@ void test_vlseg8e8ff_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_mu( @@ -733,7 +733,7 @@ void test_vlseg8e8ff_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_mu( @@ -760,7 +760,7 @@ void test_vlseg8e8ff_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_mu( @@ -787,7 +787,7 @@ void test_vlseg8e8ff_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_mu( @@ -814,7 +814,7 @@ void test_vlseg8e8ff_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_mu( @@ -841,7 +841,7 @@ void test_vlseg8e8ff_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_mu( @@ -868,6 +868,6 @@ void test_vlseg8e8ff_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlseg8e8ff_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); + return __riscv_vlseg8e8ff_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c index 74eaba728244..5610ab7c41b3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_tu( @@ -30,7 +30,7 @@ void test_vlsseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_tu( @@ -43,7 +43,7 @@ void test_vlsseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_tu( @@ -56,7 +56,7 @@ void test_vlsseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_tu( @@ -69,7 +69,7 @@ void test_vlsseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_tu( @@ -82,7 +82,7 @@ void test_vlsseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_tu( @@ -95,7 +95,7 @@ void test_vlsseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_tu( @@ -108,7 +108,7 @@ void test_vlsseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_tu( @@ -121,7 +121,7 @@ void test_vlsseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maske // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_tu( @@ -134,7 +134,7 @@ void test_vlsseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maske // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_tu( @@ -147,7 +147,7 @@ void test_vlsseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maske // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_tu( @@ -160,7 +160,7 @@ void test_vlsseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_tu( @@ -173,7 +173,7 @@ void test_vlsseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_tu( @@ -186,7 +186,7 @@ void test_vlsseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_tu( @@ -199,7 +199,7 @@ void test_vlsseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_tum( @@ -212,7 +212,7 @@ void test_vlsseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_tum( @@ -225,7 +225,7 @@ void test_vlsseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_tum( @@ -238,7 +238,7 @@ void test_vlsseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_tum( @@ -251,7 +251,7 @@ void test_vlsseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_tum( @@ -264,7 +264,7 @@ void test_vlsseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_tum( @@ -277,7 +277,7 @@ void test_vlsseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_tum( @@ -290,7 +290,7 @@ void test_vlsseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_tum( @@ -303,7 +303,7 @@ void test_vlsseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_tum( @@ -316,7 +316,7 @@ void test_vlsseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_tum( @@ -329,7 +329,7 @@ void test_vlsseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_tum( @@ -342,7 +342,7 @@ void test_vlsseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_tum( @@ -355,7 +355,7 @@ void test_vlsseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_tum( @@ -368,7 +368,7 @@ void test_vlsseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_tum( @@ -381,7 +381,7 @@ void test_vlsseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_tum( @@ -394,7 +394,7 @@ void test_vlsseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_tumu( @@ -407,7 +407,7 @@ void test_vlsseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_tumu( @@ -420,7 +420,7 @@ void test_vlsseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_tumu( @@ -433,7 +433,7 @@ void test_vlsseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_tumu( @@ -446,7 +446,7 @@ void test_vlsseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_tumu( @@ -459,7 +459,7 @@ void test_vlsseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_tumu( @@ -472,7 +472,7 @@ void test_vlsseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_tumu( @@ -485,7 +485,7 @@ void test_vlsseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_tumu( @@ -498,7 +498,7 @@ void test_vlsseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_tumu( @@ -511,7 +511,7 @@ void test_vlsseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_tumu( @@ -524,7 +524,7 @@ void test_vlsseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_tumu( @@ -537,7 +537,7 @@ void test_vlsseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_tumu( @@ -550,7 +550,7 @@ void test_vlsseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_tumu( @@ -563,7 +563,7 @@ void test_vlsseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_tumu( @@ -576,7 +576,7 @@ void test_vlsseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_tumu( @@ -589,7 +589,7 @@ void test_vlsseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_mu( @@ -602,7 +602,7 @@ void test_vlsseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_mu( @@ -615,7 +615,7 @@ void test_vlsseg2e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_mu( @@ -628,7 +628,7 @@ void test_vlsseg2e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_mu( @@ -641,7 +641,7 @@ void test_vlsseg2e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_mu( @@ -654,7 +654,7 @@ void test_vlsseg2e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_mu( @@ -667,7 +667,7 @@ void test_vlsseg2e16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_mu( @@ -680,7 +680,7 @@ void test_vlsseg2e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_mu( @@ -693,7 +693,7 @@ void test_vlsseg2e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_mu( @@ -706,7 +706,7 @@ void test_vlsseg2e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_mu( @@ -719,7 +719,7 @@ void test_vlsseg2e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_mu( @@ -732,7 +732,7 @@ void test_vlsseg2e16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_mu( @@ -745,7 +745,7 @@ void test_vlsseg2e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_mu( @@ -758,7 +758,7 @@ void test_vlsseg2e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_mu( @@ -771,7 +771,7 @@ void test_vlsseg2e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_mu( @@ -784,6 +784,6 @@ void test_vlsseg2e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e16_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c index 5972b2aa3c00..cb52756a7b12 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_tu( @@ -30,7 +30,7 @@ void test_vlsseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_tu( @@ -43,7 +43,7 @@ void test_vlsseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_tu( @@ -56,7 +56,7 @@ void test_vlsseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_tu( @@ -69,7 +69,7 @@ void test_vlsseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_tu( @@ -82,7 +82,7 @@ void test_vlsseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_tu( @@ -95,7 +95,7 @@ void test_vlsseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maske // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_tu( @@ -108,7 +108,7 @@ void test_vlsseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maske // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tu( @@ -121,7 +121,7 @@ void test_vlsseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maske // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_tu( @@ -134,7 +134,7 @@ void test_vlsseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_tu( @@ -147,7 +147,7 @@ void test_vlsseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_tu( @@ -160,7 +160,7 @@ void test_vlsseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_tum( @@ -173,7 +173,7 @@ void test_vlsseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_tum( @@ -186,7 +186,7 @@ void test_vlsseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_tum( @@ -199,7 +199,7 @@ void test_vlsseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_tum( @@ -212,7 +212,7 @@ void test_vlsseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_tum( @@ -225,7 +225,7 @@ void test_vlsseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_tum( @@ -238,7 +238,7 @@ void test_vlsseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_tum( @@ -251,7 +251,7 @@ void test_vlsseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_tum( @@ -264,7 +264,7 @@ void test_vlsseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tum( @@ -277,7 +277,7 @@ void test_vlsseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_tum( @@ -290,7 +290,7 @@ void test_vlsseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_tum( @@ -303,7 +303,7 @@ void test_vlsseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_tum( @@ -316,7 +316,7 @@ void test_vlsseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_tumu( @@ -329,7 +329,7 @@ void test_vlsseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_tumu( @@ -342,7 +342,7 @@ void test_vlsseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_tumu( @@ -355,7 +355,7 @@ void test_vlsseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_tumu( @@ -368,7 +368,7 @@ void test_vlsseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_tumu( @@ -381,7 +381,7 @@ void test_vlsseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_tumu( @@ -394,7 +394,7 @@ void test_vlsseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_tumu( @@ -407,7 +407,7 @@ void test_vlsseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_tumu( @@ -420,7 +420,7 @@ void test_vlsseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tumu( @@ -433,7 +433,7 @@ void test_vlsseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_tumu( @@ -446,7 +446,7 @@ void test_vlsseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_tumu( @@ -459,7 +459,7 @@ void test_vlsseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_tumu( @@ -472,7 +472,7 @@ void test_vlsseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_mu( @@ -485,7 +485,7 @@ void test_vlsseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_mu( @@ -498,7 +498,7 @@ void test_vlsseg2e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_mu( @@ -511,7 +511,7 @@ void test_vlsseg2e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_mu( @@ -524,7 +524,7 @@ void test_vlsseg2e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_mu( @@ -537,7 +537,7 @@ void test_vlsseg2e32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_mu( @@ -550,7 +550,7 @@ void test_vlsseg2e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_mu( @@ -563,7 +563,7 @@ void test_vlsseg2e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_mu( @@ -576,7 +576,7 @@ void test_vlsseg2e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_mu( @@ -589,7 +589,7 @@ void test_vlsseg2e32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_mu( @@ -602,7 +602,7 @@ void test_vlsseg2e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_mu( @@ -615,7 +615,7 @@ void test_vlsseg2e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_mu( @@ -628,6 +628,6 @@ void test_vlsseg2e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e32_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c index 61525d1216da..a70a4cb65f16 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_tu( @@ -30,7 +30,7 @@ void test_vlsseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_tu( @@ -43,7 +43,7 @@ void test_vlsseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_tu( @@ -56,7 +56,7 @@ void test_vlsseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_tu( @@ -69,7 +69,7 @@ void test_vlsseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maske // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_tu( @@ -82,7 +82,7 @@ void test_vlsseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maske // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_tu( @@ -95,7 +95,7 @@ void test_vlsseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maske // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_tu( @@ -108,7 +108,7 @@ void test_vlsseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_tu( @@ -121,7 +121,7 @@ void test_vlsseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_tum( @@ -134,7 +134,7 @@ void test_vlsseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_tum( @@ -147,7 +147,7 @@ void test_vlsseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_tum( @@ -160,7 +160,7 @@ void test_vlsseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_tum( @@ -173,7 +173,7 @@ void test_vlsseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_tum( @@ -186,7 +186,7 @@ void test_vlsseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_tum( @@ -199,7 +199,7 @@ void test_vlsseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_tum( @@ -212,7 +212,7 @@ void test_vlsseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_tum( @@ -225,7 +225,7 @@ void test_vlsseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_tum( @@ -238,7 +238,7 @@ void test_vlsseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_tumu( @@ -251,7 +251,7 @@ void test_vlsseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_tumu( @@ -264,7 +264,7 @@ void test_vlsseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_tumu( @@ -277,7 +277,7 @@ void test_vlsseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_tumu( @@ -290,7 +290,7 @@ void test_vlsseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_tumu( @@ -303,7 +303,7 @@ void test_vlsseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_tumu( @@ -316,7 +316,7 @@ void test_vlsseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_tumu( @@ -329,7 +329,7 @@ void test_vlsseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_tumu( @@ -342,7 +342,7 @@ void test_vlsseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_tumu( @@ -355,7 +355,7 @@ void test_vlsseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_mu( @@ -368,7 +368,7 @@ void test_vlsseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_mu( @@ -381,7 +381,7 @@ void test_vlsseg2e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_mu( @@ -394,7 +394,7 @@ void test_vlsseg2e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_mu( @@ -407,7 +407,7 @@ void test_vlsseg2e64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_mu( @@ -420,7 +420,7 @@ void test_vlsseg2e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_mu( @@ -433,7 +433,7 @@ void test_vlsseg2e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_mu( @@ -446,7 +446,7 @@ void test_vlsseg2e64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_mu( @@ -459,7 +459,7 @@ void test_vlsseg2e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_mu( @@ -472,6 +472,6 @@ void test_vlsseg2e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e64_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c index 24de371a386b..41cac9f3e9b8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c @@ -16,7 +16,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_tu( @@ -29,7 +29,7 @@ void test_vlsseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t masked // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_tu( @@ -42,7 +42,7 @@ void test_vlsseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t masked // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_tu( @@ -55,7 +55,7 @@ void test_vlsseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t masked // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_tu( @@ -68,7 +68,7 @@ void test_vlsseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_tu( @@ -81,7 +81,7 @@ void test_vlsseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_tu( @@ -94,7 +94,7 @@ void test_vlsseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_tu( @@ -107,7 +107,7 @@ void test_vlsseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_tu( @@ -120,7 +120,7 @@ void test_vlsseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_tu( @@ -133,7 +133,7 @@ void test_vlsseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_tu( @@ -146,7 +146,7 @@ void test_vlsseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedo // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_tu( @@ -159,7 +159,7 @@ void test_vlsseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedo // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_tum( @@ -172,7 +172,7 @@ void test_vlsseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedo // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_tum( @@ -185,7 +185,7 @@ void test_vlsseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_tum( @@ -198,7 +198,7 @@ void test_vlsseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_tum( @@ -211,7 +211,7 @@ void test_vlsseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_tum( @@ -224,7 +224,7 @@ void test_vlsseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_tum( @@ -237,7 +237,7 @@ void test_vlsseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_tum( @@ -250,7 +250,7 @@ void test_vlsseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_tum( @@ -263,7 +263,7 @@ void test_vlsseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_tum( @@ -276,7 +276,7 @@ void test_vlsseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_tum( @@ -289,7 +289,7 @@ void test_vlsseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_tum( @@ -302,7 +302,7 @@ void test_vlsseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_tum( @@ -315,7 +315,7 @@ void test_vlsseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_tumu( @@ -328,7 +328,7 @@ void test_vlsseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_tumu( @@ -341,7 +341,7 @@ void test_vlsseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_tumu( @@ -354,7 +354,7 @@ void test_vlsseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_tumu( @@ -367,7 +367,7 @@ void test_vlsseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_tumu( @@ -380,7 +380,7 @@ void test_vlsseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_tumu( @@ -393,7 +393,7 @@ void test_vlsseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_tumu( @@ -406,7 +406,7 @@ void test_vlsseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_tumu( @@ -419,7 +419,7 @@ void test_vlsseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_tumu( @@ -432,7 +432,7 @@ void test_vlsseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_tumu( @@ -445,7 +445,7 @@ void test_vlsseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_tumu( @@ -458,7 +458,7 @@ void test_vlsseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_tumu( @@ -471,7 +471,7 @@ void test_vlsseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_mu( @@ -484,7 +484,7 @@ void test_vlsseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_mu( @@ -497,7 +497,7 @@ void test_vlsseg2e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_mu( @@ -510,7 +510,7 @@ void test_vlsseg2e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_mu( @@ -523,7 +523,7 @@ void test_vlsseg2e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, v // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_mu( @@ -536,7 +536,7 @@ void test_vlsseg2e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_mu( @@ -549,7 +549,7 @@ void test_vlsseg2e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_mu( @@ -562,7 +562,7 @@ void test_vlsseg2e8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_mu( @@ -575,7 +575,7 @@ void test_vlsseg2e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_mu( @@ -588,7 +588,7 @@ void test_vlsseg2e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_mu( @@ -601,7 +601,7 @@ void test_vlsseg2e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_mu( @@ -614,7 +614,7 @@ void test_vlsseg2e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vui // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_mu( @@ -627,6 +627,6 @@ void test_vlsseg2e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vui // CHECK-RV64-NEXT: ret void // void test_vlsseg2e8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); + return __riscv_vlsseg2e8_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c index c56a6bfd99c5..ccf183b883a8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_tu( @@ -34,7 +34,7 @@ void test_vlsseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_tu( @@ -49,7 +49,7 @@ void test_vlsseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_tu( @@ -64,7 +64,7 @@ void test_vlsseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_tu( @@ -79,7 +79,7 @@ void test_vlsseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_tu( @@ -94,7 +94,7 @@ void test_vlsseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_tu( @@ -109,7 +109,7 @@ void test_vlsseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_tu( @@ -124,7 +124,7 @@ void test_vlsseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_tu( @@ -139,7 +139,7 @@ void test_vlsseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_tu( @@ -154,7 +154,7 @@ void test_vlsseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_tu( @@ -169,7 +169,7 @@ void test_vlsseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_tu( @@ -184,7 +184,7 @@ void test_vlsseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_tum( @@ -199,7 +199,7 @@ void test_vlsseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_tum( @@ -214,7 +214,7 @@ void test_vlsseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_tum( @@ -229,7 +229,7 @@ void test_vlsseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_tum( @@ -244,7 +244,7 @@ void test_vlsseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_tum( @@ -259,7 +259,7 @@ void test_vlsseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_tum( @@ -274,7 +274,7 @@ void test_vlsseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_tum( @@ -289,7 +289,7 @@ void test_vlsseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_tum( @@ -304,7 +304,7 @@ void test_vlsseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_tum( @@ -319,7 +319,7 @@ void test_vlsseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_tum( @@ -334,7 +334,7 @@ void test_vlsseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_tum( @@ -349,7 +349,7 @@ void test_vlsseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_tum( @@ -364,7 +364,7 @@ void test_vlsseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_tumu( @@ -379,7 +379,7 @@ void test_vlsseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_tumu( @@ -394,7 +394,7 @@ void test_vlsseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_tumu( @@ -409,7 +409,7 @@ void test_vlsseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_tumu( @@ -424,7 +424,7 @@ void test_vlsseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_tumu( @@ -439,7 +439,7 @@ void test_vlsseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_tumu( @@ -454,7 +454,7 @@ void test_vlsseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_tumu( @@ -469,7 +469,7 @@ void test_vlsseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_tumu( @@ -484,7 +484,7 @@ void test_vlsseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_tumu( @@ -499,7 +499,7 @@ void test_vlsseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_tumu( @@ -514,7 +514,7 @@ void test_vlsseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_tumu( @@ -529,7 +529,7 @@ void test_vlsseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_tumu( @@ -544,7 +544,7 @@ void test_vlsseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_mu( @@ -559,7 +559,7 @@ void test_vlsseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_mu( @@ -574,7 +574,7 @@ void test_vlsseg3e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_mu( @@ -589,7 +589,7 @@ void test_vlsseg3e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_mu( @@ -604,7 +604,7 @@ void test_vlsseg3e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_mu( @@ -619,7 +619,7 @@ void test_vlsseg3e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_mu( @@ -634,7 +634,7 @@ void test_vlsseg3e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_mu( @@ -649,7 +649,7 @@ void test_vlsseg3e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_mu( @@ -664,7 +664,7 @@ void test_vlsseg3e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_mu( @@ -679,7 +679,7 @@ void test_vlsseg3e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_mu( @@ -694,7 +694,7 @@ void test_vlsseg3e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_mu( @@ -709,7 +709,7 @@ void test_vlsseg3e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_mu( @@ -724,6 +724,6 @@ void test_vlsseg3e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e16_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c index 69e86c4e71f5..f91e26bb4413 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_tu( @@ -34,7 +34,7 @@ void test_vlsseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_tu( @@ -49,7 +49,7 @@ void test_vlsseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_tu( @@ -64,7 +64,7 @@ void test_vlsseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_tu( @@ -79,7 +79,7 @@ void test_vlsseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_tu( @@ -94,7 +94,7 @@ void test_vlsseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_tu( @@ -109,7 +109,7 @@ void test_vlsseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_tu( @@ -124,7 +124,7 @@ void test_vlsseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_tu( @@ -139,7 +139,7 @@ void test_vlsseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_tum( @@ -154,7 +154,7 @@ void test_vlsseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_tum( @@ -169,7 +169,7 @@ void test_vlsseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_tum( @@ -184,7 +184,7 @@ void test_vlsseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_tum( @@ -199,7 +199,7 @@ void test_vlsseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_tum( @@ -214,7 +214,7 @@ void test_vlsseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_tum( @@ -229,7 +229,7 @@ void test_vlsseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_tum( @@ -244,7 +244,7 @@ void test_vlsseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_tum( @@ -259,7 +259,7 @@ void test_vlsseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_tum( @@ -274,7 +274,7 @@ void test_vlsseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_tumu( @@ -289,7 +289,7 @@ void test_vlsseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_tumu( @@ -304,7 +304,7 @@ void test_vlsseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_tumu( @@ -319,7 +319,7 @@ void test_vlsseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_tumu( @@ -334,7 +334,7 @@ void test_vlsseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_tumu( @@ -349,7 +349,7 @@ void test_vlsseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_tumu( @@ -364,7 +364,7 @@ void test_vlsseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_tumu( @@ -379,7 +379,7 @@ void test_vlsseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_tumu( @@ -394,7 +394,7 @@ void test_vlsseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_tumu( @@ -409,7 +409,7 @@ void test_vlsseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_mu( @@ -424,7 +424,7 @@ void test_vlsseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_mu( @@ -439,7 +439,7 @@ void test_vlsseg3e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_mu( @@ -454,7 +454,7 @@ void test_vlsseg3e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_mu( @@ -469,7 +469,7 @@ void test_vlsseg3e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_mu( @@ -484,7 +484,7 @@ void test_vlsseg3e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_mu( @@ -499,7 +499,7 @@ void test_vlsseg3e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_mu( @@ -514,7 +514,7 @@ void test_vlsseg3e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_mu( @@ -529,7 +529,7 @@ void test_vlsseg3e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_mu( @@ -544,6 +544,6 @@ void test_vlsseg3e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e32_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c index b2518295fcd2..0b770c815aba 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_tu( @@ -34,7 +34,7 @@ void test_vlsseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_tu( @@ -49,7 +49,7 @@ void test_vlsseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_tu( @@ -64,7 +64,7 @@ void test_vlsseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_tu( @@ -79,7 +79,7 @@ void test_vlsseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_tu( @@ -94,7 +94,7 @@ void test_vlsseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_tum( @@ -109,7 +109,7 @@ void test_vlsseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_tum( @@ -124,7 +124,7 @@ void test_vlsseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_tum( @@ -139,7 +139,7 @@ void test_vlsseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_tum( @@ -154,7 +154,7 @@ void test_vlsseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_tum( @@ -169,7 +169,7 @@ void test_vlsseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_tum( @@ -184,7 +184,7 @@ void test_vlsseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_tumu( @@ -199,7 +199,7 @@ void test_vlsseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_tumu( @@ -214,7 +214,7 @@ void test_vlsseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_tumu( @@ -229,7 +229,7 @@ void test_vlsseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_tumu( @@ -244,7 +244,7 @@ void test_vlsseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_tumu( @@ -259,7 +259,7 @@ void test_vlsseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_tumu( @@ -274,7 +274,7 @@ void test_vlsseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_mu( @@ -289,7 +289,7 @@ void test_vlsseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_mu( @@ -304,7 +304,7 @@ void test_vlsseg3e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_mu( @@ -319,7 +319,7 @@ void test_vlsseg3e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_mu( @@ -334,7 +334,7 @@ void test_vlsseg3e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_mu( @@ -349,7 +349,7 @@ void test_vlsseg3e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_mu( @@ -364,6 +364,6 @@ void test_vlsseg3e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e64_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c index 86b994919ffd..16ec0ccd8ee0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c @@ -18,7 +18,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_tu( @@ -33,7 +33,7 @@ void test_vlsseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_tu( @@ -48,7 +48,7 @@ void test_vlsseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_tu( @@ -63,7 +63,7 @@ void test_vlsseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_tu( @@ -78,7 +78,7 @@ void test_vlsseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_tu( @@ -93,7 +93,7 @@ void test_vlsseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_tu( @@ -108,7 +108,7 @@ void test_vlsseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_tu( @@ -123,7 +123,7 @@ void test_vlsseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_tu( @@ -138,7 +138,7 @@ void test_vlsseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_tu( @@ -153,7 +153,7 @@ void test_vlsseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_tum( @@ -168,7 +168,7 @@ void test_vlsseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_tum( @@ -183,7 +183,7 @@ void test_vlsseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_tum( @@ -198,7 +198,7 @@ void test_vlsseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_tum( @@ -213,7 +213,7 @@ void test_vlsseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_tum( @@ -228,7 +228,7 @@ void test_vlsseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_tum( @@ -243,7 +243,7 @@ void test_vlsseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_tum( @@ -258,7 +258,7 @@ void test_vlsseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_tum( @@ -273,7 +273,7 @@ void test_vlsseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_tum( @@ -288,7 +288,7 @@ void test_vlsseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_tum( @@ -303,7 +303,7 @@ void test_vlsseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_tumu( @@ -318,7 +318,7 @@ void test_vlsseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_tumu( @@ -333,7 +333,7 @@ void test_vlsseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_tumu( @@ -348,7 +348,7 @@ void test_vlsseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_tumu( @@ -363,7 +363,7 @@ void test_vlsseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_tumu( @@ -378,7 +378,7 @@ void test_vlsseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_tumu( @@ -393,7 +393,7 @@ void test_vlsseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_tumu( @@ -408,7 +408,7 @@ void test_vlsseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_tumu( @@ -423,7 +423,7 @@ void test_vlsseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_tumu( @@ -438,7 +438,7 @@ void test_vlsseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_tumu( @@ -453,7 +453,7 @@ void test_vlsseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_mu( @@ -468,7 +468,7 @@ void test_vlsseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_mu( @@ -483,7 +483,7 @@ void test_vlsseg3e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_mu( @@ -498,7 +498,7 @@ void test_vlsseg3e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_mu( @@ -513,7 +513,7 @@ void test_vlsseg3e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_mu( @@ -528,7 +528,7 @@ void test_vlsseg3e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_mu( @@ -543,7 +543,7 @@ void test_vlsseg3e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_mu( @@ -558,7 +558,7 @@ void test_vlsseg3e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_mu( @@ -573,7 +573,7 @@ void test_vlsseg3e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_mu( @@ -588,7 +588,7 @@ void test_vlsseg3e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_mu( @@ -603,6 +603,6 @@ void test_vlsseg3e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vlsseg3e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); + return __riscv_vlsseg3e8_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c index 8850b892bc96..04b0460528e5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_tu( @@ -38,7 +38,7 @@ void test_vlsseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_tu( @@ -55,7 +55,7 @@ void test_vlsseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_tu( @@ -72,7 +72,7 @@ void test_vlsseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_tu( @@ -89,7 +89,7 @@ void test_vlsseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_tu( @@ -106,7 +106,7 @@ void test_vlsseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_tu( @@ -123,7 +123,7 @@ void test_vlsseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_tu( @@ -140,7 +140,7 @@ void test_vlsseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_tu( @@ -157,7 +157,7 @@ void test_vlsseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_tu( @@ -174,7 +174,7 @@ void test_vlsseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_tu( @@ -191,7 +191,7 @@ void test_vlsseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_tu( @@ -208,7 +208,7 @@ void test_vlsseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_tum( @@ -225,7 +225,7 @@ void test_vlsseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_tum( @@ -242,7 +242,7 @@ void test_vlsseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_tum( @@ -259,7 +259,7 @@ void test_vlsseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_tum( @@ -276,7 +276,7 @@ void test_vlsseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_tum( @@ -293,7 +293,7 @@ void test_vlsseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_tum( @@ -310,7 +310,7 @@ void test_vlsseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_tum( @@ -327,7 +327,7 @@ void test_vlsseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_tum( @@ -344,7 +344,7 @@ void test_vlsseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_tum( @@ -361,7 +361,7 @@ void test_vlsseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_tum( @@ -378,7 +378,7 @@ void test_vlsseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_tum( @@ -395,7 +395,7 @@ void test_vlsseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_tum( @@ -412,7 +412,7 @@ void test_vlsseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_tumu( @@ -429,7 +429,7 @@ void test_vlsseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_tumu( @@ -446,7 +446,7 @@ void test_vlsseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_tumu( @@ -463,7 +463,7 @@ void test_vlsseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_tumu( @@ -480,7 +480,7 @@ void test_vlsseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_tumu( @@ -497,7 +497,7 @@ void test_vlsseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_tumu( @@ -514,7 +514,7 @@ void test_vlsseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_tumu( @@ -531,7 +531,7 @@ void test_vlsseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_tumu( @@ -548,7 +548,7 @@ void test_vlsseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_tumu( @@ -565,7 +565,7 @@ void test_vlsseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_tumu( @@ -582,7 +582,7 @@ void test_vlsseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_tumu( @@ -599,7 +599,7 @@ void test_vlsseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_tumu( @@ -616,7 +616,7 @@ void test_vlsseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_mu( @@ -633,7 +633,7 @@ void test_vlsseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_mu( @@ -650,7 +650,7 @@ void test_vlsseg4e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_mu( @@ -667,7 +667,7 @@ void test_vlsseg4e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_mu( @@ -684,7 +684,7 @@ void test_vlsseg4e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_mu( @@ -701,7 +701,7 @@ void test_vlsseg4e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_mu( @@ -718,7 +718,7 @@ void test_vlsseg4e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_mu( @@ -735,7 +735,7 @@ void test_vlsseg4e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_mu( @@ -752,7 +752,7 @@ void test_vlsseg4e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_mu( @@ -769,7 +769,7 @@ void test_vlsseg4e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_mu( @@ -786,7 +786,7 @@ void test_vlsseg4e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_mu( @@ -803,7 +803,7 @@ void test_vlsseg4e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_mu( @@ -820,6 +820,6 @@ void test_vlsseg4e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e16_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c index 7b6b7d89f456..4ecf26f609db 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_tu( @@ -38,7 +38,7 @@ void test_vlsseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_tu( @@ -55,7 +55,7 @@ void test_vlsseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_tu( @@ -72,7 +72,7 @@ void test_vlsseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_tu( @@ -89,7 +89,7 @@ void test_vlsseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_tu( @@ -106,7 +106,7 @@ void test_vlsseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_tu( @@ -123,7 +123,7 @@ void test_vlsseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_tu( @@ -140,7 +140,7 @@ void test_vlsseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_tu( @@ -157,7 +157,7 @@ void test_vlsseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_tum( @@ -174,7 +174,7 @@ void test_vlsseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_tum( @@ -191,7 +191,7 @@ void test_vlsseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_tum( @@ -208,7 +208,7 @@ void test_vlsseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_tum( @@ -225,7 +225,7 @@ void test_vlsseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_tum( @@ -242,7 +242,7 @@ void test_vlsseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_tum( @@ -259,7 +259,7 @@ void test_vlsseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_tum( @@ -276,7 +276,7 @@ void test_vlsseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_tum( @@ -293,7 +293,7 @@ void test_vlsseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_tum( @@ -310,7 +310,7 @@ void test_vlsseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_tumu( @@ -327,7 +327,7 @@ void test_vlsseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_tumu( @@ -344,7 +344,7 @@ void test_vlsseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_tumu( @@ -361,7 +361,7 @@ void test_vlsseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_tumu( @@ -378,7 +378,7 @@ void test_vlsseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_tumu( @@ -395,7 +395,7 @@ void test_vlsseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_tumu( @@ -412,7 +412,7 @@ void test_vlsseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_tumu( @@ -429,7 +429,7 @@ void test_vlsseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_tumu( @@ -446,7 +446,7 @@ void test_vlsseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_tumu( @@ -463,7 +463,7 @@ void test_vlsseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_mu( @@ -480,7 +480,7 @@ void test_vlsseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_mu( @@ -497,7 +497,7 @@ void test_vlsseg4e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_mu( @@ -514,7 +514,7 @@ void test_vlsseg4e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_mu( @@ -531,7 +531,7 @@ void test_vlsseg4e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_mu( @@ -548,7 +548,7 @@ void test_vlsseg4e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_mu( @@ -565,7 +565,7 @@ void test_vlsseg4e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_mu( @@ -582,7 +582,7 @@ void test_vlsseg4e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_mu( @@ -599,7 +599,7 @@ void test_vlsseg4e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_mu( @@ -616,6 +616,6 @@ void test_vlsseg4e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e32_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c index 3e6044b085cf..f38c80fccb54 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_tu( @@ -38,7 +38,7 @@ void test_vlsseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_tu( @@ -55,7 +55,7 @@ void test_vlsseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_tu( @@ -72,7 +72,7 @@ void test_vlsseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_tu( @@ -89,7 +89,7 @@ void test_vlsseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_tu( @@ -106,7 +106,7 @@ void test_vlsseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_tum( @@ -123,7 +123,7 @@ void test_vlsseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_tum( @@ -140,7 +140,7 @@ void test_vlsseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_tum( @@ -157,7 +157,7 @@ void test_vlsseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_tum( @@ -174,7 +174,7 @@ void test_vlsseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_tum( @@ -191,7 +191,7 @@ void test_vlsseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_tum( @@ -208,7 +208,7 @@ void test_vlsseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_tumu( @@ -225,7 +225,7 @@ void test_vlsseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_tumu( @@ -242,7 +242,7 @@ void test_vlsseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_tumu( @@ -259,7 +259,7 @@ void test_vlsseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_tumu( @@ -276,7 +276,7 @@ void test_vlsseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_tumu( @@ -293,7 +293,7 @@ void test_vlsseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_tumu( @@ -310,7 +310,7 @@ void test_vlsseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_mu( @@ -327,7 +327,7 @@ void test_vlsseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_mu( @@ -344,7 +344,7 @@ void test_vlsseg4e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_mu( @@ -361,7 +361,7 @@ void test_vlsseg4e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_mu( @@ -378,7 +378,7 @@ void test_vlsseg4e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_mu( @@ -395,7 +395,7 @@ void test_vlsseg4e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_mu( @@ -412,6 +412,6 @@ void test_vlsseg4e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e64_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c index d7b3c0e9be91..d86dbec0f729 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_tu( @@ -37,7 +37,7 @@ void test_vlsseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_tu( @@ -54,7 +54,7 @@ void test_vlsseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_tu( @@ -71,7 +71,7 @@ void test_vlsseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_tu( @@ -88,7 +88,7 @@ void test_vlsseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_tu( @@ -105,7 +105,7 @@ void test_vlsseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_tu( @@ -122,7 +122,7 @@ void test_vlsseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_tu( @@ -139,7 +139,7 @@ void test_vlsseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_tu( @@ -156,7 +156,7 @@ void test_vlsseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_tu( @@ -173,7 +173,7 @@ void test_vlsseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_tum( @@ -190,7 +190,7 @@ void test_vlsseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_tum( @@ -207,7 +207,7 @@ void test_vlsseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_tum( @@ -224,7 +224,7 @@ void test_vlsseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_tum( @@ -241,7 +241,7 @@ void test_vlsseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_tum( @@ -258,7 +258,7 @@ void test_vlsseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_tum( @@ -275,7 +275,7 @@ void test_vlsseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_tum( @@ -292,7 +292,7 @@ void test_vlsseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_tum( @@ -309,7 +309,7 @@ void test_vlsseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_tum( @@ -326,7 +326,7 @@ void test_vlsseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_tum( @@ -343,7 +343,7 @@ void test_vlsseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_tumu( @@ -360,7 +360,7 @@ void test_vlsseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_tumu( @@ -377,7 +377,7 @@ void test_vlsseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_tumu( @@ -394,7 +394,7 @@ void test_vlsseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_tumu( @@ -411,7 +411,7 @@ void test_vlsseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_tumu( @@ -428,7 +428,7 @@ void test_vlsseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_tumu( @@ -445,7 +445,7 @@ void test_vlsseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_tumu( @@ -462,7 +462,7 @@ void test_vlsseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_tumu( @@ -479,7 +479,7 @@ void test_vlsseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_tumu( @@ -496,7 +496,7 @@ void test_vlsseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_tumu( @@ -513,7 +513,7 @@ void test_vlsseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_mu( @@ -530,7 +530,7 @@ void test_vlsseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_mu( @@ -547,7 +547,7 @@ void test_vlsseg4e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_mu( @@ -564,7 +564,7 @@ void test_vlsseg4e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_mu( @@ -581,7 +581,7 @@ void test_vlsseg4e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_mu( @@ -598,7 +598,7 @@ void test_vlsseg4e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_mu( @@ -615,7 +615,7 @@ void test_vlsseg4e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_mu( @@ -632,7 +632,7 @@ void test_vlsseg4e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_mu( @@ -649,7 +649,7 @@ void test_vlsseg4e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_mu( @@ -666,7 +666,7 @@ void test_vlsseg4e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_mu( @@ -683,6 +683,6 @@ void test_vlsseg4e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg4e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); + return __riscv_vlsseg4e8_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c index ea12fbeb72e2..c55dc2d03449 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_tu( @@ -42,7 +42,7 @@ void test_vlsseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_tu( @@ -61,7 +61,7 @@ void test_vlsseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_tu( @@ -80,7 +80,7 @@ void test_vlsseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_tu( @@ -99,7 +99,7 @@ void test_vlsseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_tu( @@ -118,7 +118,7 @@ void test_vlsseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_tu( @@ -137,7 +137,7 @@ void test_vlsseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_tu( @@ -156,7 +156,7 @@ void test_vlsseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_tu( @@ -175,7 +175,7 @@ void test_vlsseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_tum( @@ -194,7 +194,7 @@ void test_vlsseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_tum( @@ -213,7 +213,7 @@ void test_vlsseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_tum( @@ -232,7 +232,7 @@ void test_vlsseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_tum( @@ -251,7 +251,7 @@ void test_vlsseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_tum( @@ -270,7 +270,7 @@ void test_vlsseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_tum( @@ -289,7 +289,7 @@ void test_vlsseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_tum( @@ -308,7 +308,7 @@ void test_vlsseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_tum( @@ -327,7 +327,7 @@ void test_vlsseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_tum( @@ -346,7 +346,7 @@ void test_vlsseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_tumu( @@ -365,7 +365,7 @@ void test_vlsseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_tumu( @@ -384,7 +384,7 @@ void test_vlsseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_tumu( @@ -403,7 +403,7 @@ void test_vlsseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_tumu( @@ -422,7 +422,7 @@ void test_vlsseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_tumu( @@ -441,7 +441,7 @@ void test_vlsseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_tumu( @@ -460,7 +460,7 @@ void test_vlsseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_tumu( @@ -479,7 +479,7 @@ void test_vlsseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_tumu( @@ -498,7 +498,7 @@ void test_vlsseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_tumu( @@ -517,7 +517,7 @@ void test_vlsseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_mu( @@ -536,7 +536,7 @@ void test_vlsseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_mu( @@ -555,7 +555,7 @@ void test_vlsseg5e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_mu( @@ -574,7 +574,7 @@ void test_vlsseg5e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_mu( @@ -593,7 +593,7 @@ void test_vlsseg5e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_mu( @@ -612,7 +612,7 @@ void test_vlsseg5e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_mu( @@ -631,7 +631,7 @@ void test_vlsseg5e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_mu( @@ -650,7 +650,7 @@ void test_vlsseg5e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_mu( @@ -669,7 +669,7 @@ void test_vlsseg5e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_mu( @@ -688,6 +688,6 @@ void test_vlsseg5e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e16_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c index 95eb79f10e9f..dc6bf8a72013 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_tu( @@ -42,7 +42,7 @@ void test_vlsseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_tu( @@ -61,7 +61,7 @@ void test_vlsseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_tu( @@ -80,7 +80,7 @@ void test_vlsseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_tu( @@ -99,7 +99,7 @@ void test_vlsseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_tu( @@ -118,7 +118,7 @@ void test_vlsseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_tum( @@ -137,7 +137,7 @@ void test_vlsseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_tum( @@ -156,7 +156,7 @@ void test_vlsseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_tum( @@ -175,7 +175,7 @@ void test_vlsseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_tum( @@ -194,7 +194,7 @@ void test_vlsseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_tum( @@ -213,7 +213,7 @@ void test_vlsseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_tum( @@ -232,7 +232,7 @@ void test_vlsseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_tumu( @@ -251,7 +251,7 @@ void test_vlsseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_tumu( @@ -270,7 +270,7 @@ void test_vlsseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_tumu( @@ -289,7 +289,7 @@ void test_vlsseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_tumu( @@ -308,7 +308,7 @@ void test_vlsseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_tumu( @@ -327,7 +327,7 @@ void test_vlsseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_tumu( @@ -346,7 +346,7 @@ void test_vlsseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_mu( @@ -365,7 +365,7 @@ void test_vlsseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_mu( @@ -384,7 +384,7 @@ void test_vlsseg5e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_mu( @@ -403,7 +403,7 @@ void test_vlsseg5e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_mu( @@ -422,7 +422,7 @@ void test_vlsseg5e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_mu( @@ -441,7 +441,7 @@ void test_vlsseg5e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_mu( @@ -460,6 +460,6 @@ void test_vlsseg5e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e32_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c index 168345bde751..7427377e2be0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_tu( @@ -42,7 +42,7 @@ void test_vlsseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_tu( @@ -61,7 +61,7 @@ void test_vlsseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_tum( @@ -80,7 +80,7 @@ void test_vlsseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_tum( @@ -99,7 +99,7 @@ void test_vlsseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_tum( @@ -118,7 +118,7 @@ void test_vlsseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_tumu( @@ -137,7 +137,7 @@ void test_vlsseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_tumu( @@ -156,7 +156,7 @@ void test_vlsseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_tumu( @@ -175,7 +175,7 @@ void test_vlsseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_mu( @@ -194,7 +194,7 @@ void test_vlsseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_mu( @@ -213,7 +213,7 @@ void test_vlsseg5e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_mu( @@ -232,6 +232,6 @@ void test_vlsseg5e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e64_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c index d73b2466f3d5..c0d196bc4509 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c @@ -22,7 +22,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_tu( @@ -41,7 +41,7 @@ void test_vlsseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_tu( @@ -60,7 +60,7 @@ void test_vlsseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_tu( @@ -79,7 +79,7 @@ void test_vlsseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_tu( @@ -98,7 +98,7 @@ void test_vlsseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_tu( @@ -117,7 +117,7 @@ void test_vlsseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_tu( @@ -136,7 +136,7 @@ void test_vlsseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_tu( @@ -155,7 +155,7 @@ void test_vlsseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_tum( @@ -174,7 +174,7 @@ void test_vlsseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_tum( @@ -193,7 +193,7 @@ void test_vlsseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_tum( @@ -212,7 +212,7 @@ void test_vlsseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_tum( @@ -231,7 +231,7 @@ void test_vlsseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_tum( @@ -250,7 +250,7 @@ void test_vlsseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_tum( @@ -269,7 +269,7 @@ void test_vlsseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_tum( @@ -288,7 +288,7 @@ void test_vlsseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_tum( @@ -307,7 +307,7 @@ void test_vlsseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_tumu( @@ -326,7 +326,7 @@ void test_vlsseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_tumu( @@ -345,7 +345,7 @@ void test_vlsseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_tumu( @@ -364,7 +364,7 @@ void test_vlsseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_tumu( @@ -383,7 +383,7 @@ void test_vlsseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_tumu( @@ -402,7 +402,7 @@ void test_vlsseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_tumu( @@ -421,7 +421,7 @@ void test_vlsseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_tumu( @@ -440,7 +440,7 @@ void test_vlsseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_tumu( @@ -459,7 +459,7 @@ void test_vlsseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_mu( @@ -478,7 +478,7 @@ void test_vlsseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_mu( @@ -497,7 +497,7 @@ void test_vlsseg5e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_mu( @@ -516,7 +516,7 @@ void test_vlsseg5e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_mu( @@ -535,7 +535,7 @@ void test_vlsseg5e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_mu( @@ -554,7 +554,7 @@ void test_vlsseg5e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_mu( @@ -573,7 +573,7 @@ void test_vlsseg5e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_mu( @@ -592,7 +592,7 @@ void test_vlsseg5e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_mu( @@ -611,6 +611,6 @@ void test_vlsseg5e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg5e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); + return __riscv_vlsseg5e8_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c index c74f3ee0f90b..b242586ce104 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_tu( @@ -46,7 +46,7 @@ void test_vlsseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_tu( @@ -67,7 +67,7 @@ void test_vlsseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_tu( @@ -88,7 +88,7 @@ void test_vlsseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_tu( @@ -109,7 +109,7 @@ void test_vlsseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_tu( @@ -130,7 +130,7 @@ void test_vlsseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_tu( @@ -151,7 +151,7 @@ void test_vlsseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_tu( @@ -172,7 +172,7 @@ void test_vlsseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_tu( @@ -193,7 +193,7 @@ void test_vlsseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_tum( @@ -214,7 +214,7 @@ void test_vlsseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_tum( @@ -235,7 +235,7 @@ void test_vlsseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_tum( @@ -256,7 +256,7 @@ void test_vlsseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_tum( @@ -277,7 +277,7 @@ void test_vlsseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_tum( @@ -298,7 +298,7 @@ void test_vlsseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_tum( @@ -319,7 +319,7 @@ void test_vlsseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_tum( @@ -340,7 +340,7 @@ void test_vlsseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_tum( @@ -361,7 +361,7 @@ void test_vlsseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_tum( @@ -382,7 +382,7 @@ void test_vlsseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_tumu( @@ -403,7 +403,7 @@ void test_vlsseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_tumu( @@ -424,7 +424,7 @@ void test_vlsseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_tumu( @@ -445,7 +445,7 @@ void test_vlsseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_tumu( @@ -466,7 +466,7 @@ void test_vlsseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_tumu( @@ -487,7 +487,7 @@ void test_vlsseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_tumu( @@ -508,7 +508,7 @@ void test_vlsseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_tumu( @@ -529,7 +529,7 @@ void test_vlsseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_tumu( @@ -550,7 +550,7 @@ void test_vlsseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_tumu( @@ -571,7 +571,7 @@ void test_vlsseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_mu( @@ -592,7 +592,7 @@ void test_vlsseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_mu( @@ -613,7 +613,7 @@ void test_vlsseg6e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_mu( @@ -634,7 +634,7 @@ void test_vlsseg6e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_mu( @@ -655,7 +655,7 @@ void test_vlsseg6e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_mu( @@ -676,7 +676,7 @@ void test_vlsseg6e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_mu( @@ -697,7 +697,7 @@ void test_vlsseg6e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_mu( @@ -718,7 +718,7 @@ void test_vlsseg6e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_mu( @@ -739,7 +739,7 @@ void test_vlsseg6e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_mu( @@ -760,6 +760,6 @@ void test_vlsseg6e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c index 3c912ad87ca6..01c2478f5150 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_tu( @@ -46,7 +46,7 @@ void test_vlsseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_tu( @@ -67,7 +67,7 @@ void test_vlsseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_tu( @@ -88,7 +88,7 @@ void test_vlsseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_tu( @@ -109,7 +109,7 @@ void test_vlsseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_tu( @@ -130,7 +130,7 @@ void test_vlsseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_tum( @@ -151,7 +151,7 @@ void test_vlsseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_tum( @@ -172,7 +172,7 @@ void test_vlsseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_tum( @@ -193,7 +193,7 @@ void test_vlsseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_tum( @@ -214,7 +214,7 @@ void test_vlsseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_tum( @@ -235,7 +235,7 @@ void test_vlsseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_tum( @@ -256,7 +256,7 @@ void test_vlsseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_tumu( @@ -277,7 +277,7 @@ void test_vlsseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_tumu( @@ -298,7 +298,7 @@ void test_vlsseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_tumu( @@ -319,7 +319,7 @@ void test_vlsseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_tumu( @@ -340,7 +340,7 @@ void test_vlsseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_tumu( @@ -361,7 +361,7 @@ void test_vlsseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_tumu( @@ -382,7 +382,7 @@ void test_vlsseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_mu( @@ -403,7 +403,7 @@ void test_vlsseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_mu( @@ -424,7 +424,7 @@ void test_vlsseg6e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_mu( @@ -445,7 +445,7 @@ void test_vlsseg6e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_mu( @@ -466,7 +466,7 @@ void test_vlsseg6e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_mu( @@ -487,7 +487,7 @@ void test_vlsseg6e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_mu( @@ -508,6 +508,6 @@ void test_vlsseg6e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c index 555b0f44896a..8994f2e7e795 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_tu( @@ -46,7 +46,7 @@ void test_vlsseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_tu( @@ -67,7 +67,7 @@ void test_vlsseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_tum( @@ -88,7 +88,7 @@ void test_vlsseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_tum( @@ -109,7 +109,7 @@ void test_vlsseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_tum( @@ -130,7 +130,7 @@ void test_vlsseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_tumu( @@ -151,7 +151,7 @@ void test_vlsseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_tumu( @@ -172,7 +172,7 @@ void test_vlsseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_tumu( @@ -193,7 +193,7 @@ void test_vlsseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_mu( @@ -214,7 +214,7 @@ void test_vlsseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_mu( @@ -235,7 +235,7 @@ void test_vlsseg6e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_mu( @@ -256,6 +256,6 @@ void test_vlsseg6e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c index ba249221ef90..fba3545bac52 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c @@ -24,7 +24,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_tu( @@ -45,7 +45,7 @@ void test_vlsseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_tu( @@ -66,7 +66,7 @@ void test_vlsseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_tu( @@ -87,7 +87,7 @@ void test_vlsseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_tu( @@ -108,7 +108,7 @@ void test_vlsseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_tu( @@ -129,7 +129,7 @@ void test_vlsseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_tu( @@ -150,7 +150,7 @@ void test_vlsseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_tu( @@ -171,7 +171,7 @@ void test_vlsseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_tum( @@ -192,7 +192,7 @@ void test_vlsseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_tum( @@ -213,7 +213,7 @@ void test_vlsseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_tum( @@ -234,7 +234,7 @@ void test_vlsseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_tum( @@ -255,7 +255,7 @@ void test_vlsseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_tum( @@ -276,7 +276,7 @@ void test_vlsseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_tum( @@ -297,7 +297,7 @@ void test_vlsseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_tum( @@ -318,7 +318,7 @@ void test_vlsseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_tum( @@ -339,7 +339,7 @@ void test_vlsseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_tumu( @@ -360,7 +360,7 @@ void test_vlsseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_tumu( @@ -381,7 +381,7 @@ void test_vlsseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_tumu( @@ -402,7 +402,7 @@ void test_vlsseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_tumu( @@ -423,7 +423,7 @@ void test_vlsseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_tumu( @@ -444,7 +444,7 @@ void test_vlsseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_tumu( @@ -465,7 +465,7 @@ void test_vlsseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_tumu( @@ -486,7 +486,7 @@ void test_vlsseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_tumu( @@ -507,7 +507,7 @@ void test_vlsseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_mu( @@ -528,7 +528,7 @@ void test_vlsseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_mu( @@ -549,7 +549,7 @@ void test_vlsseg6e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_mu( @@ -570,7 +570,7 @@ void test_vlsseg6e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_mu( @@ -591,7 +591,7 @@ void test_vlsseg6e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_mu( @@ -612,7 +612,7 @@ void test_vlsseg6e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_mu( @@ -633,7 +633,7 @@ void test_vlsseg6e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_mu( @@ -654,7 +654,7 @@ void test_vlsseg6e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_mu( @@ -675,6 +675,6 @@ void test_vlsseg6e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg6e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); + return __riscv_vlsseg6e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c index 3546a83d7f4b..27751e99b9fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_tu( @@ -50,7 +50,7 @@ void test_vlsseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_tu( @@ -73,7 +73,7 @@ void test_vlsseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_tu( @@ -96,7 +96,7 @@ void test_vlsseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_tu( @@ -119,7 +119,7 @@ void test_vlsseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_tu( @@ -142,7 +142,7 @@ void test_vlsseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_tu( @@ -165,7 +165,7 @@ void test_vlsseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_tu( @@ -188,7 +188,7 @@ void test_vlsseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_tu( @@ -211,7 +211,7 @@ void test_vlsseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_tum( @@ -234,7 +234,7 @@ void test_vlsseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_tum( @@ -257,7 +257,7 @@ void test_vlsseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_tum( @@ -280,7 +280,7 @@ void test_vlsseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_tum( @@ -303,7 +303,7 @@ void test_vlsseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_tum( @@ -326,7 +326,7 @@ void test_vlsseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_tum( @@ -349,7 +349,7 @@ void test_vlsseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_tum( @@ -372,7 +372,7 @@ void test_vlsseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_tum( @@ -395,7 +395,7 @@ void test_vlsseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_tum( @@ -418,7 +418,7 @@ void test_vlsseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_tumu( @@ -441,7 +441,7 @@ void test_vlsseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_tumu( @@ -464,7 +464,7 @@ void test_vlsseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_tumu( @@ -487,7 +487,7 @@ void test_vlsseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_tumu( @@ -510,7 +510,7 @@ void test_vlsseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_tumu( @@ -533,7 +533,7 @@ void test_vlsseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_tumu( @@ -556,7 +556,7 @@ void test_vlsseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_tumu( @@ -579,7 +579,7 @@ void test_vlsseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_tumu( @@ -602,7 +602,7 @@ void test_vlsseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_tumu( @@ -625,7 +625,7 @@ void test_vlsseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_mu( @@ -648,7 +648,7 @@ void test_vlsseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_mu( @@ -671,7 +671,7 @@ void test_vlsseg7e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_mu( @@ -694,7 +694,7 @@ void test_vlsseg7e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_mu( @@ -717,7 +717,7 @@ void test_vlsseg7e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_mu( @@ -740,7 +740,7 @@ void test_vlsseg7e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_mu( @@ -763,7 +763,7 @@ void test_vlsseg7e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_mu( @@ -786,7 +786,7 @@ void test_vlsseg7e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_mu( @@ -809,7 +809,7 @@ void test_vlsseg7e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_mu( @@ -832,6 +832,6 @@ void test_vlsseg7e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c index 46774d0cb1d7..ca979fe60c72 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_tu( @@ -50,7 +50,7 @@ void test_vlsseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_tu( @@ -73,7 +73,7 @@ void test_vlsseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_tu( @@ -96,7 +96,7 @@ void test_vlsseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_tu( @@ -119,7 +119,7 @@ void test_vlsseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_tu( @@ -142,7 +142,7 @@ void test_vlsseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_tum( @@ -165,7 +165,7 @@ void test_vlsseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_tum( @@ -188,7 +188,7 @@ void test_vlsseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_tum( @@ -211,7 +211,7 @@ void test_vlsseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_tum( @@ -234,7 +234,7 @@ void test_vlsseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_tum( @@ -257,7 +257,7 @@ void test_vlsseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_tum( @@ -280,7 +280,7 @@ void test_vlsseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_tumu( @@ -303,7 +303,7 @@ void test_vlsseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_tumu( @@ -326,7 +326,7 @@ void test_vlsseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_tumu( @@ -349,7 +349,7 @@ void test_vlsseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_tumu( @@ -372,7 +372,7 @@ void test_vlsseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_tumu( @@ -395,7 +395,7 @@ void test_vlsseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_tumu( @@ -418,7 +418,7 @@ void test_vlsseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_mu( @@ -441,7 +441,7 @@ void test_vlsseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_mu( @@ -464,7 +464,7 @@ void test_vlsseg7e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_mu( @@ -487,7 +487,7 @@ void test_vlsseg7e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_mu( @@ -510,7 +510,7 @@ void test_vlsseg7e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_mu( @@ -533,7 +533,7 @@ void test_vlsseg7e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_mu( @@ -556,6 +556,6 @@ void test_vlsseg7e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c index cc0e88da28d0..c07b0d01d28c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_tu( @@ -50,7 +50,7 @@ void test_vlsseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_tu( @@ -73,7 +73,7 @@ void test_vlsseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_tum( @@ -96,7 +96,7 @@ void test_vlsseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_tum( @@ -119,7 +119,7 @@ void test_vlsseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_tum( @@ -142,7 +142,7 @@ void test_vlsseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_tumu( @@ -165,7 +165,7 @@ void test_vlsseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_tumu( @@ -188,7 +188,7 @@ void test_vlsseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_tumu( @@ -211,7 +211,7 @@ void test_vlsseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_mu( @@ -234,7 +234,7 @@ void test_vlsseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_mu( @@ -257,7 +257,7 @@ void test_vlsseg7e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_mu( @@ -280,6 +280,6 @@ void test_vlsseg7e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c index 8169a3a38ae4..50ce948862b0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c @@ -26,7 +26,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_tu( @@ -49,7 +49,7 @@ void test_vlsseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_tu( @@ -72,7 +72,7 @@ void test_vlsseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_tu( @@ -95,7 +95,7 @@ void test_vlsseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_tu( @@ -118,7 +118,7 @@ void test_vlsseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_tu( @@ -141,7 +141,7 @@ void test_vlsseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_tu( @@ -164,7 +164,7 @@ void test_vlsseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_tu( @@ -187,7 +187,7 @@ void test_vlsseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_tum( @@ -210,7 +210,7 @@ void test_vlsseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_tum( @@ -233,7 +233,7 @@ void test_vlsseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_tum( @@ -256,7 +256,7 @@ void test_vlsseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_tum( @@ -279,7 +279,7 @@ void test_vlsseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_tum( @@ -302,7 +302,7 @@ void test_vlsseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_tum( @@ -325,7 +325,7 @@ void test_vlsseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_tum( @@ -348,7 +348,7 @@ void test_vlsseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_tum( @@ -371,7 +371,7 @@ void test_vlsseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_tumu( @@ -394,7 +394,7 @@ void test_vlsseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_tumu( @@ -417,7 +417,7 @@ void test_vlsseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_tumu( @@ -440,7 +440,7 @@ void test_vlsseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_tumu( @@ -463,7 +463,7 @@ void test_vlsseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_tumu( @@ -486,7 +486,7 @@ void test_vlsseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_tumu( @@ -509,7 +509,7 @@ void test_vlsseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_tumu( @@ -532,7 +532,7 @@ void test_vlsseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_tumu( @@ -555,7 +555,7 @@ void test_vlsseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_mu( @@ -578,7 +578,7 @@ void test_vlsseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_mu( @@ -601,7 +601,7 @@ void test_vlsseg7e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_mu( @@ -624,7 +624,7 @@ void test_vlsseg7e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_mu( @@ -647,7 +647,7 @@ void test_vlsseg7e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_mu( @@ -670,7 +670,7 @@ void test_vlsseg7e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_mu( @@ -693,7 +693,7 @@ void test_vlsseg7e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_mu( @@ -716,7 +716,7 @@ void test_vlsseg7e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_mu( @@ -739,6 +739,6 @@ void test_vlsseg7e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg7e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); + return __riscv_vlsseg7e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c index e9cd79ca88e9..3c4113f61adb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_tu( @@ -54,7 +54,7 @@ void test_vlsseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_tu( @@ -79,7 +79,7 @@ void test_vlsseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_tu( @@ -104,7 +104,7 @@ void test_vlsseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_tu( @@ -129,7 +129,7 @@ void test_vlsseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_tu( @@ -154,7 +154,7 @@ void test_vlsseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_tu( @@ -179,7 +179,7 @@ void test_vlsseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_tu( @@ -204,7 +204,7 @@ void test_vlsseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_tu( @@ -229,7 +229,7 @@ void test_vlsseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_tum( @@ -254,7 +254,7 @@ void test_vlsseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_tum( @@ -279,7 +279,7 @@ void test_vlsseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_tum( @@ -304,7 +304,7 @@ void test_vlsseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_tum( @@ -329,7 +329,7 @@ void test_vlsseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_tum( @@ -354,7 +354,7 @@ void test_vlsseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_tum( @@ -379,7 +379,7 @@ void test_vlsseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_tum( @@ -404,7 +404,7 @@ void test_vlsseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_tum( @@ -429,7 +429,7 @@ void test_vlsseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_tum( @@ -454,7 +454,7 @@ void test_vlsseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_tumu( @@ -479,7 +479,7 @@ void test_vlsseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_tumu( @@ -504,7 +504,7 @@ void test_vlsseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_tumu( @@ -529,7 +529,7 @@ void test_vlsseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_tumu( @@ -554,7 +554,7 @@ void test_vlsseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_tumu( @@ -579,7 +579,7 @@ void test_vlsseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_tumu( @@ -604,7 +604,7 @@ void test_vlsseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_tumu( @@ -629,7 +629,7 @@ void test_vlsseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_tumu( @@ -654,7 +654,7 @@ void test_vlsseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_tumu( @@ -679,7 +679,7 @@ void test_vlsseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_mu( @@ -704,7 +704,7 @@ void test_vlsseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_mu( @@ -729,7 +729,7 @@ void test_vlsseg8e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_mu( @@ -754,7 +754,7 @@ void test_vlsseg8e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_mu( @@ -779,7 +779,7 @@ void test_vlsseg8e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_mu( @@ -804,7 +804,7 @@ void test_vlsseg8e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_mu( @@ -829,7 +829,7 @@ void test_vlsseg8e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_mu( @@ -854,7 +854,7 @@ void test_vlsseg8e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_mu( @@ -879,7 +879,7 @@ void test_vlsseg8e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_mu( @@ -904,6 +904,6 @@ void test_vlsseg8e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c index 68f4666701b8..4c51e6745239 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_tu( @@ -54,7 +54,7 @@ void test_vlsseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_tu( @@ -79,7 +79,7 @@ void test_vlsseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_tu( @@ -104,7 +104,7 @@ void test_vlsseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_tu( @@ -129,7 +129,7 @@ void test_vlsseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_tu( @@ -154,7 +154,7 @@ void test_vlsseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_tum( @@ -179,7 +179,7 @@ void test_vlsseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_tum( @@ -204,7 +204,7 @@ void test_vlsseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_tum( @@ -229,7 +229,7 @@ void test_vlsseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_tum( @@ -254,7 +254,7 @@ void test_vlsseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_tum( @@ -279,7 +279,7 @@ void test_vlsseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_tum( @@ -304,7 +304,7 @@ void test_vlsseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_tumu( @@ -329,7 +329,7 @@ void test_vlsseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_tumu( @@ -354,7 +354,7 @@ void test_vlsseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_tumu( @@ -379,7 +379,7 @@ void test_vlsseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_tumu( @@ -404,7 +404,7 @@ void test_vlsseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_tumu( @@ -429,7 +429,7 @@ void test_vlsseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_tumu( @@ -454,7 +454,7 @@ void test_vlsseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_mu( @@ -479,7 +479,7 @@ void test_vlsseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_mu( @@ -504,7 +504,7 @@ void test_vlsseg8e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_mu( @@ -529,7 +529,7 @@ void test_vlsseg8e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_mu( @@ -554,7 +554,7 @@ void test_vlsseg8e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_mu( @@ -579,7 +579,7 @@ void test_vlsseg8e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_mu( @@ -604,6 +604,6 @@ void test_vlsseg8e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c index f7209d0f8279..bb1e20604fe3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_tu( @@ -54,7 +54,7 @@ void test_vlsseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_tu( @@ -79,7 +79,7 @@ void test_vlsseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_tum( @@ -104,7 +104,7 @@ void test_vlsseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_tum( @@ -129,7 +129,7 @@ void test_vlsseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_tum( @@ -154,7 +154,7 @@ void test_vlsseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_tumu( @@ -179,7 +179,7 @@ void test_vlsseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_tumu( @@ -204,7 +204,7 @@ void test_vlsseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_tumu( @@ -229,7 +229,7 @@ void test_vlsseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_mu( @@ -254,7 +254,7 @@ void test_vlsseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_mu( @@ -279,7 +279,7 @@ void test_vlsseg8e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_mu( @@ -304,6 +304,6 @@ void test_vlsseg8e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c index 86791574112d..e96852247165 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c @@ -28,7 +28,7 @@ // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_tu( @@ -53,7 +53,7 @@ void test_vlsseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_tu( @@ -78,7 +78,7 @@ void test_vlsseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_tu( @@ -103,7 +103,7 @@ void test_vlsseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_tu( @@ -128,7 +128,7 @@ void test_vlsseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_tu( @@ -153,7 +153,7 @@ void test_vlsseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_tu( @@ -178,7 +178,7 @@ void test_vlsseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_tu( @@ -203,7 +203,7 @@ void test_vlsseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_tum( @@ -228,7 +228,7 @@ void test_vlsseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_tum( @@ -253,7 +253,7 @@ void test_vlsseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_tum( @@ -278,7 +278,7 @@ void test_vlsseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_tum( @@ -303,7 +303,7 @@ void test_vlsseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_tum( @@ -328,7 +328,7 @@ void test_vlsseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_tum( @@ -353,7 +353,7 @@ void test_vlsseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_tum( @@ -378,7 +378,7 @@ void test_vlsseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_tum( @@ -403,7 +403,7 @@ void test_vlsseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_tumu( @@ -428,7 +428,7 @@ void test_vlsseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_tumu( @@ -453,7 +453,7 @@ void test_vlsseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_tumu( @@ -478,7 +478,7 @@ void test_vlsseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_tumu( @@ -503,7 +503,7 @@ void test_vlsseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_tumu( @@ -528,7 +528,7 @@ void test_vlsseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_tumu( @@ -553,7 +553,7 @@ void test_vlsseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_tumu( @@ -578,7 +578,7 @@ void test_vlsseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_tumu( @@ -603,7 +603,7 @@ void test_vlsseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_mu( @@ -628,7 +628,7 @@ void test_vlsseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_mu( @@ -653,7 +653,7 @@ void test_vlsseg8e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_mu( @@ -678,7 +678,7 @@ void test_vlsseg8e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_mu( @@ -703,7 +703,7 @@ void test_vlsseg8e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_mu( @@ -728,7 +728,7 @@ void test_vlsseg8e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_mu( @@ -753,7 +753,7 @@ void test_vlsseg8e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_mu( @@ -778,7 +778,7 @@ void test_vlsseg8e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_mu( @@ -803,6 +803,6 @@ void test_vlsseg8e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vlsseg8e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); + return __riscv_vlsseg8e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei16.c index 1f66e9c4306e..c11cebdb7268 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_f16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vluxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_tu( @@ -148,7 +148,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_tu( @@ -157,7 +157,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_tu( @@ -166,7 +166,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_tu( @@ -175,7 +175,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_tu( @@ -184,7 +184,7 @@ vint8m1_t test_vluxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_tu( @@ -193,7 +193,7 @@ vint8m2_t test_vluxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i8m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_tu( @@ -202,7 +202,7 @@ vint8m4_t test_vluxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_tu( @@ -211,7 +211,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_tu( @@ -220,7 +220,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_tu( @@ -229,7 +229,7 @@ vint16m1_t test_vluxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_tu( @@ -238,7 +238,7 @@ vint16m2_t test_vluxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_tu( @@ -247,7 +247,7 @@ vint16m4_t test_vluxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_tu( @@ -256,7 +256,7 @@ vint16m8_t test_vluxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_tu( @@ -265,7 +265,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_tu( @@ -274,7 +274,7 @@ vint32m1_t test_vluxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_tu( @@ -283,7 +283,7 @@ vint32m2_t test_vluxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_tu( @@ -292,7 +292,7 @@ vint32m4_t test_vluxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_tu( @@ -301,7 +301,7 @@ vint32m8_t test_vluxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_tu( @@ -310,7 +310,7 @@ vint64m1_t test_vluxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_tu( @@ -319,7 +319,7 @@ vint64m2_t test_vluxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_tu( @@ -328,7 +328,7 @@ vint64m4_t test_vluxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_tu( @@ -337,7 +337,7 @@ vint64m8_t test_vluxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_tu( @@ -346,7 +346,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_tu( @@ -355,7 +355,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_tu( @@ -364,7 +364,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_tu( @@ -373,7 +373,7 @@ vuint8m1_t test_vluxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_tu( @@ -382,7 +382,7 @@ vuint8m2_t test_vluxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u8m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_tu( @@ -391,7 +391,7 @@ vuint8m4_t test_vluxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_tu( @@ -400,7 +400,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_tu( @@ -409,7 +409,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_tu( @@ -418,7 +418,7 @@ vuint16m1_t test_vluxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_tu( @@ -427,7 +427,7 @@ vuint16m2_t test_vluxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_tu( @@ -436,7 +436,7 @@ vuint16m4_t test_vluxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_tu( @@ -445,7 +445,7 @@ vuint16m8_t test_vluxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_tu( @@ -454,7 +454,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_tu( @@ -463,7 +463,7 @@ vuint32m1_t test_vluxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_tu( @@ -472,7 +472,7 @@ vuint32m2_t test_vluxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_tu( @@ -481,7 +481,7 @@ vuint32m4_t test_vluxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_tu( @@ -490,7 +490,7 @@ vuint32m8_t test_vluxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_tu( @@ -499,7 +499,7 @@ vuint64m1_t test_vluxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_tu( @@ -508,7 +508,7 @@ vuint64m2_t test_vluxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_tu( @@ -517,7 +517,7 @@ vuint64m4_t test_vluxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_tum( @@ -526,7 +526,7 @@ vuint64m8_t test_vluxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_tum( @@ -535,7 +535,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_tum( @@ -544,7 +544,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_tum( @@ -553,7 +553,7 @@ vfloat16m1_t test_vluxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_tum( @@ -562,7 +562,7 @@ vfloat16m2_t test_vluxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_tum( @@ -571,7 +571,7 @@ vfloat16m4_t test_vluxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_f16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_tum( @@ -580,7 +580,7 @@ vfloat16m8_t test_vluxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_tum( @@ -589,7 +589,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_tum( @@ -598,7 +598,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_tum( @@ -607,7 +607,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_tum( @@ -616,7 +616,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_tum( @@ -625,7 +625,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_tum( @@ -634,7 +634,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_tum( @@ -643,7 +643,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_tum( @@ -652,7 +652,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_tum( @@ -661,7 +661,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_tum( @@ -670,7 +670,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_tum( @@ -679,7 +679,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_tum( @@ -688,7 +688,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_tum( @@ -697,7 +697,7 @@ vint8m1_t test_vluxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_tum( @@ -706,7 +706,7 @@ vint8m2_t test_vluxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i8m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_tum( @@ -715,7 +715,7 @@ vint8m4_t test_vluxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_tum( @@ -724,7 +724,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_tum( @@ -733,7 +733,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_tum( @@ -742,7 +742,7 @@ vint16m1_t test_vluxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_tum( @@ -751,7 +751,7 @@ vint16m2_t test_vluxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_tum( @@ -760,7 +760,7 @@ vint16m4_t test_vluxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_tum( @@ -769,7 +769,7 @@ vint16m8_t test_vluxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_tum( @@ -778,7 +778,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_tum( @@ -787,7 +787,7 @@ vint32m1_t test_vluxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_tum( @@ -796,7 +796,7 @@ vint32m2_t test_vluxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_tum( @@ -805,7 +805,7 @@ vint32m4_t test_vluxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_tum( @@ -814,7 +814,7 @@ vint32m8_t test_vluxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_tum( @@ -823,7 +823,7 @@ vint64m1_t test_vluxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_tum( @@ -832,7 +832,7 @@ vint64m2_t test_vluxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_tum( @@ -841,7 +841,7 @@ vint64m4_t test_vluxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_tum( @@ -850,7 +850,7 @@ vint64m8_t test_vluxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_tum( @@ -859,7 +859,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_tum( @@ -868,7 +868,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_tum( @@ -877,7 +877,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_tum( @@ -886,7 +886,7 @@ vuint8m1_t test_vluxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_tum( @@ -895,7 +895,7 @@ vuint8m2_t test_vluxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u8m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_tum( @@ -904,7 +904,7 @@ vuint8m4_t test_vluxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_tum( @@ -913,7 +913,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_tum( @@ -922,7 +922,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_tum( @@ -931,7 +931,7 @@ vuint16m1_t test_vluxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_tum( @@ -940,7 +940,7 @@ vuint16m2_t test_vluxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_tum( @@ -949,7 +949,7 @@ vuint16m4_t test_vluxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_tum( @@ -958,7 +958,7 @@ vuint16m8_t test_vluxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_tum( @@ -967,7 +967,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_tum( @@ -976,7 +976,7 @@ vuint32m1_t test_vluxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_tum( @@ -985,7 +985,7 @@ vuint32m2_t test_vluxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_tum( @@ -994,7 +994,7 @@ vuint32m4_t test_vluxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_tum( @@ -1003,7 +1003,7 @@ vuint32m8_t test_vluxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_tum( @@ -1012,7 +1012,7 @@ vuint64m1_t test_vluxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_tum( @@ -1021,7 +1021,7 @@ vuint64m2_t test_vluxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_tum( @@ -1030,7 +1030,7 @@ vuint64m4_t test_vluxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_tumu( @@ -1039,7 +1039,7 @@ vuint64m8_t test_vluxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_tumu( @@ -1048,7 +1048,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_tumu( @@ -1057,7 +1057,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_tumu( @@ -1066,7 +1066,7 @@ vfloat16m1_t test_vluxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_tumu( @@ -1075,7 +1075,7 @@ vfloat16m2_t test_vluxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_tumu( @@ -1084,7 +1084,7 @@ vfloat16m4_t test_vluxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_tumu( @@ -1093,7 +1093,7 @@ vfloat16m8_t test_vluxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_tumu( @@ -1102,7 +1102,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_tumu( @@ -1111,7 +1111,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_tumu( @@ -1120,7 +1120,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_tumu( @@ -1129,7 +1129,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_tumu( @@ -1138,7 +1138,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_tumu( @@ -1147,7 +1147,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_tumu( @@ -1156,7 +1156,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_tumu( @@ -1165,7 +1165,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_tumu( @@ -1174,7 +1174,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_tumu( @@ -1183,7 +1183,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_tumu( @@ -1192,7 +1192,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_tumu( @@ -1201,7 +1201,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_tumu( @@ -1210,7 +1210,7 @@ vint8m1_t test_vluxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_tumu( @@ -1219,7 +1219,7 @@ vint8m2_t test_vluxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_tumu( @@ -1228,7 +1228,7 @@ vint8m4_t test_vluxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_tumu( @@ -1237,7 +1237,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_tumu( @@ -1246,7 +1246,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_tumu( @@ -1255,7 +1255,7 @@ vint16m1_t test_vluxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_tumu( @@ -1264,7 +1264,7 @@ vint16m2_t test_vluxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_tumu( @@ -1273,7 +1273,7 @@ vint16m4_t test_vluxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_tumu( @@ -1282,7 +1282,7 @@ vint16m8_t test_vluxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_tumu( @@ -1291,7 +1291,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_tumu( @@ -1300,7 +1300,7 @@ vint32m1_t test_vluxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_tumu( @@ -1309,7 +1309,7 @@ vint32m2_t test_vluxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_tumu( @@ -1318,7 +1318,7 @@ vint32m4_t test_vluxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_tumu( @@ -1327,7 +1327,7 @@ vint32m8_t test_vluxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_tumu( @@ -1336,7 +1336,7 @@ vint64m1_t test_vluxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_tumu( @@ -1345,7 +1345,7 @@ vint64m2_t test_vluxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_tumu( @@ -1354,7 +1354,7 @@ vint64m4_t test_vluxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_tumu( @@ -1363,7 +1363,7 @@ vint64m8_t test_vluxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_tumu( @@ -1372,7 +1372,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_tumu( @@ -1381,7 +1381,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_tumu( @@ -1390,7 +1390,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_tumu( @@ -1399,7 +1399,7 @@ vuint8m1_t test_vluxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_tumu( @@ -1408,7 +1408,7 @@ vuint8m2_t test_vluxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_tumu( @@ -1417,7 +1417,7 @@ vuint8m4_t test_vluxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_tumu( @@ -1426,7 +1426,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_tumu( @@ -1435,7 +1435,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_tumu( @@ -1444,7 +1444,7 @@ vuint16m1_t test_vluxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_tumu( @@ -1453,7 +1453,7 @@ vuint16m2_t test_vluxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_tumu( @@ -1462,7 +1462,7 @@ vuint16m4_t test_vluxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_tumu( @@ -1471,7 +1471,7 @@ vuint16m8_t test_vluxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_tumu( @@ -1480,7 +1480,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_tumu( @@ -1489,7 +1489,7 @@ vuint32m1_t test_vluxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_tumu( @@ -1498,7 +1498,7 @@ vuint32m2_t test_vluxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_tumu( @@ -1507,7 +1507,7 @@ vuint32m4_t test_vluxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_tumu( @@ -1516,7 +1516,7 @@ vuint32m8_t test_vluxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_tumu( @@ -1525,7 +1525,7 @@ vuint64m1_t test_vluxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_tumu( @@ -1534,7 +1534,7 @@ vuint64m2_t test_vluxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_tumu( @@ -1543,7 +1543,7 @@ vuint64m4_t test_vluxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_mu( @@ -1552,7 +1552,7 @@ vuint64m8_t test_vluxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_mu( @@ -1561,7 +1561,7 @@ vfloat16mf4_t test_vluxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_mu( @@ -1570,7 +1570,7 @@ vfloat16mf2_t test_vluxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_mu( @@ -1579,7 +1579,7 @@ vfloat16m1_t test_vluxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_mu( @@ -1588,7 +1588,7 @@ vfloat16m2_t test_vluxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_mu( @@ -1597,7 +1597,7 @@ vfloat16m4_t test_vluxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_f16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_mu( @@ -1606,7 +1606,7 @@ vfloat16m8_t test_vluxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_mu( @@ -1615,7 +1615,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_mu( @@ -1624,7 +1624,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_mu( @@ -1633,7 +1633,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_mu( @@ -1642,7 +1642,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_mu( @@ -1651,7 +1651,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_mu( @@ -1660,7 +1660,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_mu( @@ -1669,7 +1669,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_mu( @@ -1678,7 +1678,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_f64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_mu( @@ -1687,7 +1687,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_mu( @@ -1696,7 +1696,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_mu( @@ -1705,7 +1705,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_mu( @@ -1714,7 +1714,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_mu( @@ -1723,7 +1723,7 @@ vint8m1_t test_vluxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_mu( @@ -1732,7 +1732,7 @@ vint8m2_t test_vluxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i8m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i8m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_mu( @@ -1741,7 +1741,7 @@ vint8m4_t test_vluxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_mu( @@ -1750,7 +1750,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_mu( @@ -1759,7 +1759,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_mu( @@ -1768,7 +1768,7 @@ vint16m1_t test_vluxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_mu( @@ -1777,7 +1777,7 @@ vint16m2_t test_vluxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_mu( @@ -1786,7 +1786,7 @@ vint16m4_t test_vluxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_i16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_mu( @@ -1795,7 +1795,7 @@ vint16m8_t test_vluxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_mu( @@ -1804,7 +1804,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_mu( @@ -1813,7 +1813,7 @@ vint32m1_t test_vluxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_mu( @@ -1822,7 +1822,7 @@ vint32m2_t test_vluxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_mu( @@ -1831,7 +1831,7 @@ vint32m4_t test_vluxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_i32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_mu( @@ -1840,7 +1840,7 @@ vint32m8_t test_vluxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_i64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_mu( @@ -1849,7 +1849,7 @@ vint64m1_t test_vluxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_i64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_mu( @@ -1858,7 +1858,7 @@ vint64m2_t test_vluxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_i64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_mu( @@ -1867,7 +1867,7 @@ vint64m4_t test_vluxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_i64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_i64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_mu( @@ -1876,7 +1876,7 @@ vint64m8_t test_vluxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_mu( @@ -1885,7 +1885,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_mu( @@ -1894,7 +1894,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_mu( @@ -1903,7 +1903,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_mu( @@ -1912,7 +1912,7 @@ vuint8m1_t test_vluxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_mu( @@ -1921,7 +1921,7 @@ vuint8m2_t test_vluxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u8m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u8m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_mu( @@ -1930,7 +1930,7 @@ vuint8m4_t test_vluxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_mu( @@ -1939,7 +1939,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_mu( @@ -1948,7 +1948,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_mu( @@ -1957,7 +1957,7 @@ vuint16m1_t test_vluxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_mu( @@ -1966,7 +1966,7 @@ vuint16m2_t test_vluxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_mu( @@ -1975,7 +1975,7 @@ vuint16m4_t test_vluxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_u16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_mu( @@ -1984,7 +1984,7 @@ vuint16m8_t test_vluxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_mu( @@ -1993,7 +1993,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_mu( @@ -2002,7 +2002,7 @@ vuint32m1_t test_vluxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_mu( @@ -2011,7 +2011,7 @@ vuint32m2_t test_vluxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_mu( @@ -2020,7 +2020,7 @@ vuint32m4_t test_vluxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_u32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_mu( @@ -2029,7 +2029,7 @@ vuint32m8_t test_vluxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_u64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_mu( @@ -2038,7 +2038,7 @@ vuint64m1_t test_vluxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_u64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_mu( @@ -2047,7 +2047,7 @@ vuint64m2_t test_vluxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_u64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_mu( @@ -2056,6 +2056,6 @@ vuint64m4_t test_vluxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei16_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_u64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei16_v_u64m8_mu(mask, maskedoff, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei32.c index 0b9d4a6fe290..4ad555ec58f4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei32.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_tu( @@ -67,7 +67,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_tu( @@ -76,7 +76,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_tu( @@ -85,7 +85,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_tu( @@ -94,7 +94,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_tu( @@ -103,7 +103,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_tu( @@ -112,7 +112,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_tu( @@ -121,7 +121,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_tu( @@ -130,7 +130,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_tu( @@ -139,7 +139,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_tu( @@ -148,7 +148,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_tu( @@ -157,7 +157,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_tu( @@ -166,7 +166,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_tu( @@ -175,7 +175,7 @@ vint8m1_t test_vluxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_tu( @@ -184,7 +184,7 @@ vint8m2_t test_vluxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_tu( @@ -193,7 +193,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_tu( @@ -202,7 +202,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_tu( @@ -211,7 +211,7 @@ vint16m1_t test_vluxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_tu( @@ -220,7 +220,7 @@ vint16m2_t test_vluxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_tu( @@ -229,7 +229,7 @@ vint16m4_t test_vluxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_tu( @@ -238,7 +238,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_tu( @@ -247,7 +247,7 @@ vint32m1_t test_vluxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_tu( @@ -256,7 +256,7 @@ vint32m2_t test_vluxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_tu( @@ -265,7 +265,7 @@ vint32m4_t test_vluxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_tu( @@ -274,7 +274,7 @@ vint32m8_t test_vluxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_tu( @@ -283,7 +283,7 @@ vint64m1_t test_vluxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_tu( @@ -292,7 +292,7 @@ vint64m2_t test_vluxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_tu( @@ -301,7 +301,7 @@ vint64m4_t test_vluxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_tu( @@ -310,7 +310,7 @@ vint64m8_t test_vluxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_tu( @@ -319,7 +319,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_tu( @@ -328,7 +328,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_tu( @@ -337,7 +337,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_tu( @@ -346,7 +346,7 @@ vuint8m1_t test_vluxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_tu( @@ -355,7 +355,7 @@ vuint8m2_t test_vluxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_tu( @@ -364,7 +364,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_tu( @@ -373,7 +373,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_tu( @@ -382,7 +382,7 @@ vuint16m1_t test_vluxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_tu( @@ -391,7 +391,7 @@ vuint16m2_t test_vluxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_tu( @@ -400,7 +400,7 @@ vuint16m4_t test_vluxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_tu( @@ -409,7 +409,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_tu( @@ -418,7 +418,7 @@ vuint32m1_t test_vluxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_tu( @@ -427,7 +427,7 @@ vuint32m2_t test_vluxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_tu( @@ -436,7 +436,7 @@ vuint32m4_t test_vluxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_tu( @@ -445,7 +445,7 @@ vuint32m8_t test_vluxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_tu( @@ -454,7 +454,7 @@ vuint64m1_t test_vluxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_tu( @@ -463,7 +463,7 @@ vuint64m2_t test_vluxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_tu( @@ -472,7 +472,7 @@ vuint64m4_t test_vluxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_tum( @@ -481,7 +481,7 @@ vuint64m8_t test_vluxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_tum( @@ -490,7 +490,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_tum( @@ -499,7 +499,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_tum( @@ -508,7 +508,7 @@ vfloat16m1_t test_vluxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_tum( @@ -517,7 +517,7 @@ vfloat16m2_t test_vluxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_tum( @@ -526,7 +526,7 @@ vfloat16m4_t test_vluxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_tum( @@ -535,7 +535,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_tum( @@ -544,7 +544,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_tum( @@ -553,7 +553,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_tum( @@ -562,7 +562,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_tum( @@ -571,7 +571,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_tum( @@ -580,7 +580,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_tum( @@ -589,7 +589,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_tum( @@ -598,7 +598,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_tum( @@ -607,7 +607,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_tum( @@ -616,7 +616,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_tum( @@ -625,7 +625,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_tum( @@ -634,7 +634,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_tum( @@ -643,7 +643,7 @@ vint8m1_t test_vluxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_tum( @@ -652,7 +652,7 @@ vint8m2_t test_vluxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_tum( @@ -661,7 +661,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_tum( @@ -670,7 +670,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_tum( @@ -679,7 +679,7 @@ vint16m1_t test_vluxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_tum( @@ -688,7 +688,7 @@ vint16m2_t test_vluxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_tum( @@ -697,7 +697,7 @@ vint16m4_t test_vluxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_tum( @@ -706,7 +706,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_tum( @@ -715,7 +715,7 @@ vint32m1_t test_vluxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_tum( @@ -724,7 +724,7 @@ vint32m2_t test_vluxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_tum( @@ -733,7 +733,7 @@ vint32m4_t test_vluxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_tum( @@ -742,7 +742,7 @@ vint32m8_t test_vluxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_tum( @@ -751,7 +751,7 @@ vint64m1_t test_vluxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_tum( @@ -760,7 +760,7 @@ vint64m2_t test_vluxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_tum( @@ -769,7 +769,7 @@ vint64m4_t test_vluxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_tum( @@ -778,7 +778,7 @@ vint64m8_t test_vluxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_tum( @@ -787,7 +787,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_tum( @@ -796,7 +796,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_tum( @@ -805,7 +805,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_tum( @@ -814,7 +814,7 @@ vuint8m1_t test_vluxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_tum( @@ -823,7 +823,7 @@ vuint8m2_t test_vluxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_tum( @@ -832,7 +832,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_tum( @@ -841,7 +841,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_tum( @@ -850,7 +850,7 @@ vuint16m1_t test_vluxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_tum( @@ -859,7 +859,7 @@ vuint16m2_t test_vluxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_tum( @@ -868,7 +868,7 @@ vuint16m4_t test_vluxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_tum( @@ -877,7 +877,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_tum( @@ -886,7 +886,7 @@ vuint32m1_t test_vluxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_tum( @@ -895,7 +895,7 @@ vuint32m2_t test_vluxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_tum( @@ -904,7 +904,7 @@ vuint32m4_t test_vluxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_tum( @@ -913,7 +913,7 @@ vuint32m8_t test_vluxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_tum( @@ -922,7 +922,7 @@ vuint64m1_t test_vluxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_tum( @@ -931,7 +931,7 @@ vuint64m2_t test_vluxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_tum( @@ -940,7 +940,7 @@ vuint64m4_t test_vluxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_tumu( @@ -949,7 +949,7 @@ vuint64m8_t test_vluxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_tumu( @@ -958,7 +958,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_tumu( @@ -967,7 +967,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_tumu( @@ -976,7 +976,7 @@ vfloat16m1_t test_vluxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_tumu( @@ -985,7 +985,7 @@ vfloat16m2_t test_vluxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_tumu( @@ -994,7 +994,7 @@ vfloat16m4_t test_vluxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_tumu( @@ -1003,7 +1003,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_tumu( @@ -1012,7 +1012,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_tumu( @@ -1021,7 +1021,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_tumu( @@ -1030,7 +1030,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_tumu( @@ -1039,7 +1039,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_tumu( @@ -1048,7 +1048,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_tumu( @@ -1057,7 +1057,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_tumu( @@ -1066,7 +1066,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_tumu( @@ -1075,7 +1075,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_tumu( @@ -1084,7 +1084,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_tumu( @@ -1093,7 +1093,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_tumu( @@ -1102,7 +1102,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_tumu( @@ -1111,7 +1111,7 @@ vint8m1_t test_vluxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_tumu( @@ -1120,7 +1120,7 @@ vint8m2_t test_vluxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_tumu( @@ -1129,7 +1129,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_tumu( @@ -1138,7 +1138,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_tumu( @@ -1147,7 +1147,7 @@ vint16m1_t test_vluxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_tumu( @@ -1156,7 +1156,7 @@ vint16m2_t test_vluxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_tumu( @@ -1165,7 +1165,7 @@ vint16m4_t test_vluxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_tumu( @@ -1174,7 +1174,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_tumu( @@ -1183,7 +1183,7 @@ vint32m1_t test_vluxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_tumu( @@ -1192,7 +1192,7 @@ vint32m2_t test_vluxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_tumu( @@ -1201,7 +1201,7 @@ vint32m4_t test_vluxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_tumu( @@ -1210,7 +1210,7 @@ vint32m8_t test_vluxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_tumu( @@ -1219,7 +1219,7 @@ vint64m1_t test_vluxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_tumu( @@ -1228,7 +1228,7 @@ vint64m2_t test_vluxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_tumu( @@ -1237,7 +1237,7 @@ vint64m4_t test_vluxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_tumu( @@ -1246,7 +1246,7 @@ vint64m8_t test_vluxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_tumu( @@ -1255,7 +1255,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_tumu( @@ -1264,7 +1264,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_tumu( @@ -1273,7 +1273,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_tumu( @@ -1282,7 +1282,7 @@ vuint8m1_t test_vluxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_tumu( @@ -1291,7 +1291,7 @@ vuint8m2_t test_vluxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_tumu( @@ -1300,7 +1300,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_tumu( @@ -1309,7 +1309,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_tumu( @@ -1318,7 +1318,7 @@ vuint16m1_t test_vluxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_tumu( @@ -1327,7 +1327,7 @@ vuint16m2_t test_vluxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_tumu( @@ -1336,7 +1336,7 @@ vuint16m4_t test_vluxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_tumu( @@ -1345,7 +1345,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_tumu( @@ -1354,7 +1354,7 @@ vuint32m1_t test_vluxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_tumu( @@ -1363,7 +1363,7 @@ vuint32m2_t test_vluxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_tumu( @@ -1372,7 +1372,7 @@ vuint32m4_t test_vluxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_tumu( @@ -1381,7 +1381,7 @@ vuint32m8_t test_vluxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_tumu( @@ -1390,7 +1390,7 @@ vuint64m1_t test_vluxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_tumu( @@ -1399,7 +1399,7 @@ vuint64m2_t test_vluxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_tumu( @@ -1408,7 +1408,7 @@ vuint64m4_t test_vluxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_mu( @@ -1417,7 +1417,7 @@ vuint64m8_t test_vluxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_mu( @@ -1426,7 +1426,7 @@ vfloat16mf4_t test_vluxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_mu( @@ -1435,7 +1435,7 @@ vfloat16mf2_t test_vluxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_mu( @@ -1444,7 +1444,7 @@ vfloat16m1_t test_vluxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_mu( @@ -1453,7 +1453,7 @@ vfloat16m2_t test_vluxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_mu( @@ -1462,7 +1462,7 @@ vfloat16m4_t test_vluxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_mu( @@ -1471,7 +1471,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_mu( @@ -1480,7 +1480,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_mu( @@ -1489,7 +1489,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_mu( @@ -1498,7 +1498,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_mu( @@ -1507,7 +1507,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_mu( @@ -1516,7 +1516,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_mu( @@ -1525,7 +1525,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_mu( @@ -1534,7 +1534,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_f64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_mu( @@ -1543,7 +1543,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_mu( @@ -1552,7 +1552,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_mu( @@ -1561,7 +1561,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_mu( @@ -1570,7 +1570,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_mu( @@ -1579,7 +1579,7 @@ vint8m1_t test_vluxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_mu( @@ -1588,7 +1588,7 @@ vint8m2_t test_vluxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_mu( @@ -1597,7 +1597,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_mu( @@ -1606,7 +1606,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_mu( @@ -1615,7 +1615,7 @@ vint16m1_t test_vluxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_mu( @@ -1624,7 +1624,7 @@ vint16m2_t test_vluxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_mu( @@ -1633,7 +1633,7 @@ vint16m4_t test_vluxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_mu( @@ -1642,7 +1642,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_mu( @@ -1651,7 +1651,7 @@ vint32m1_t test_vluxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_mu( @@ -1660,7 +1660,7 @@ vint32m2_t test_vluxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_mu( @@ -1669,7 +1669,7 @@ vint32m4_t test_vluxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_i32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_mu( @@ -1678,7 +1678,7 @@ vint32m8_t test_vluxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_i64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_mu( @@ -1687,7 +1687,7 @@ vint64m1_t test_vluxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_i64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_mu( @@ -1696,7 +1696,7 @@ vint64m2_t test_vluxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_i64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_mu( @@ -1705,7 +1705,7 @@ vint64m4_t test_vluxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_i64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_i64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_mu( @@ -1714,7 +1714,7 @@ vint64m8_t test_vluxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_mu( @@ -1723,7 +1723,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_mu( @@ -1732,7 +1732,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_mu( @@ -1741,7 +1741,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_mu( @@ -1750,7 +1750,7 @@ vuint8m1_t test_vluxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_mu( @@ -1759,7 +1759,7 @@ vuint8m2_t test_vluxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_mu( @@ -1768,7 +1768,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_mu( @@ -1777,7 +1777,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_mu( @@ -1786,7 +1786,7 @@ vuint16m1_t test_vluxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_mu( @@ -1795,7 +1795,7 @@ vuint16m2_t test_vluxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_mu( @@ -1804,7 +1804,7 @@ vuint16m4_t test_vluxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_mu( @@ -1813,7 +1813,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_mu( @@ -1822,7 +1822,7 @@ vuint32m1_t test_vluxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_mu( @@ -1831,7 +1831,7 @@ vuint32m2_t test_vluxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_mu( @@ -1840,7 +1840,7 @@ vuint32m4_t test_vluxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_u32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_mu( @@ -1849,7 +1849,7 @@ vuint32m8_t test_vluxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_u64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_mu( @@ -1858,7 +1858,7 @@ vuint64m1_t test_vluxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_u64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_mu( @@ -1867,7 +1867,7 @@ vuint64m2_t test_vluxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_u64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_mu( @@ -1876,6 +1876,6 @@ vuint64m4_t test_vluxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei32_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_u64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei32_v_u64m8_mu(mask, maskedoff, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei64.c index d8ffa22485f4..c6e2417045fa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_tu( @@ -58,7 +58,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_tu( @@ -67,7 +67,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_tu( @@ -76,7 +76,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_tu( @@ -85,7 +85,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_tu( @@ -94,7 +94,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_tu( @@ -103,7 +103,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_tu( @@ -112,7 +112,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_tu( @@ -121,7 +121,7 @@ vfloat64m8_t test_vluxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_tu( @@ -130,7 +130,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_tu( @@ -139,7 +139,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_tu( @@ -148,7 +148,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_tu( @@ -157,7 +157,7 @@ vint8m1_t test_vluxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_tu( @@ -166,7 +166,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_tu( @@ -175,7 +175,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_tu( @@ -184,7 +184,7 @@ vint16m1_t test_vluxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_tu( @@ -193,7 +193,7 @@ vint16m2_t test_vluxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_tu( @@ -202,7 +202,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_tu( @@ -211,7 +211,7 @@ vint32m1_t test_vluxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_tu( @@ -220,7 +220,7 @@ vint32m2_t test_vluxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_tu( @@ -229,7 +229,7 @@ vint32m4_t test_vluxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_tu( @@ -238,7 +238,7 @@ vint64m1_t test_vluxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_tu( @@ -247,7 +247,7 @@ vint64m2_t test_vluxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_tu( @@ -256,7 +256,7 @@ vint64m4_t test_vluxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_tu( @@ -265,7 +265,7 @@ vint64m8_t test_vluxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_tu( @@ -274,7 +274,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_tu( @@ -283,7 +283,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_tu( @@ -292,7 +292,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_tu( @@ -301,7 +301,7 @@ vuint8m1_t test_vluxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_tu( @@ -310,7 +310,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_tu( @@ -319,7 +319,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_tu( @@ -328,7 +328,7 @@ vuint16m1_t test_vluxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_tu( @@ -337,7 +337,7 @@ vuint16m2_t test_vluxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_tu( @@ -346,7 +346,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *b // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_tu( @@ -355,7 +355,7 @@ vuint32m1_t test_vluxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_tu( @@ -364,7 +364,7 @@ vuint32m2_t test_vluxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_tu( @@ -373,7 +373,7 @@ vuint32m4_t test_vluxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_tu( @@ -382,7 +382,7 @@ vuint64m1_t test_vluxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_tu( @@ -391,7 +391,7 @@ vuint64m2_t test_vluxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_tu( @@ -400,7 +400,7 @@ vuint64m4_t test_vluxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_tum( @@ -409,7 +409,7 @@ vuint64m8_t test_vluxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_tum( @@ -418,7 +418,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_tum( @@ -427,7 +427,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_tum( @@ -436,7 +436,7 @@ vfloat16m1_t test_vluxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_tum( @@ -445,7 +445,7 @@ vfloat16m2_t test_vluxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_tum( @@ -454,7 +454,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_tum( @@ -463,7 +463,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_tum( @@ -472,7 +472,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_tum( @@ -481,7 +481,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_tum( @@ -490,7 +490,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_tum( @@ -499,7 +499,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_tum( @@ -508,7 +508,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_tum( @@ -517,7 +517,7 @@ vfloat64m8_t test_vluxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_tum( @@ -526,7 +526,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_tum( @@ -535,7 +535,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_tum( @@ -544,7 +544,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_tum( @@ -553,7 +553,7 @@ vint8m1_t test_vluxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_tum( @@ -562,7 +562,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_tum( @@ -571,7 +571,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_tum( @@ -580,7 +580,7 @@ vint16m1_t test_vluxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_tum( @@ -589,7 +589,7 @@ vint16m2_t test_vluxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_tum( @@ -598,7 +598,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_tum( @@ -607,7 +607,7 @@ vint32m1_t test_vluxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_tum( @@ -616,7 +616,7 @@ vint32m2_t test_vluxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_tum( @@ -625,7 +625,7 @@ vint32m4_t test_vluxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_tum( @@ -634,7 +634,7 @@ vint64m1_t test_vluxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_tum( @@ -643,7 +643,7 @@ vint64m2_t test_vluxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_tum( @@ -652,7 +652,7 @@ vint64m4_t test_vluxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_tum( @@ -661,7 +661,7 @@ vint64m8_t test_vluxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_tum( @@ -670,7 +670,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_tum( @@ -679,7 +679,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_tum( @@ -688,7 +688,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_tum( @@ -697,7 +697,7 @@ vuint8m1_t test_vluxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_tum( @@ -706,7 +706,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_tum( @@ -715,7 +715,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_tum( @@ -724,7 +724,7 @@ vuint16m1_t test_vluxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_tum( @@ -733,7 +733,7 @@ vuint16m2_t test_vluxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_tum( @@ -742,7 +742,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_tum( @@ -751,7 +751,7 @@ vuint32m1_t test_vluxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_tum( @@ -760,7 +760,7 @@ vuint32m2_t test_vluxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_tum( @@ -769,7 +769,7 @@ vuint32m4_t test_vluxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_tum( @@ -778,7 +778,7 @@ vuint64m1_t test_vluxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_tum( @@ -787,7 +787,7 @@ vuint64m2_t test_vluxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_tum( @@ -796,7 +796,7 @@ vuint64m4_t test_vluxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_tumu( @@ -805,7 +805,7 @@ vuint64m8_t test_vluxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_tumu( @@ -814,7 +814,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_tumu( @@ -823,7 +823,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_tumu( @@ -832,7 +832,7 @@ vfloat16m1_t test_vluxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_tumu( @@ -841,7 +841,7 @@ vfloat16m2_t test_vluxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_tumu( @@ -850,7 +850,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_tumu( @@ -859,7 +859,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_tumu( @@ -868,7 +868,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_tumu( @@ -877,7 +877,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_tumu( @@ -886,7 +886,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_tumu( @@ -895,7 +895,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_tumu( @@ -904,7 +904,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_tumu( @@ -913,7 +913,7 @@ vfloat64m8_t test_vluxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_tumu( @@ -922,7 +922,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_tumu( @@ -931,7 +931,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_tumu( @@ -940,7 +940,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_tumu( @@ -949,7 +949,7 @@ vint8m1_t test_vluxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_tumu( @@ -958,7 +958,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_tumu( @@ -967,7 +967,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_tumu( @@ -976,7 +976,7 @@ vint16m1_t test_vluxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_tumu( @@ -985,7 +985,7 @@ vint16m2_t test_vluxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_tumu( @@ -994,7 +994,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_tumu( @@ -1003,7 +1003,7 @@ vint32m1_t test_vluxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_tumu( @@ -1012,7 +1012,7 @@ vint32m2_t test_vluxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_tumu( @@ -1021,7 +1021,7 @@ vint32m4_t test_vluxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_tumu( @@ -1030,7 +1030,7 @@ vint64m1_t test_vluxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_tumu( @@ -1039,7 +1039,7 @@ vint64m2_t test_vluxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_tumu( @@ -1048,7 +1048,7 @@ vint64m4_t test_vluxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_tumu( @@ -1057,7 +1057,7 @@ vint64m8_t test_vluxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_tumu( @@ -1066,7 +1066,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_tumu( @@ -1075,7 +1075,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_tumu( @@ -1084,7 +1084,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_tumu( @@ -1093,7 +1093,7 @@ vuint8m1_t test_vluxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_tumu( @@ -1102,7 +1102,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_tumu( @@ -1111,7 +1111,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_tumu( @@ -1120,7 +1120,7 @@ vuint16m1_t test_vluxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_tumu( @@ -1129,7 +1129,7 @@ vuint16m2_t test_vluxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_tumu( @@ -1138,7 +1138,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_tumu( @@ -1147,7 +1147,7 @@ vuint32m1_t test_vluxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_tumu( @@ -1156,7 +1156,7 @@ vuint32m2_t test_vluxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_tumu( @@ -1165,7 +1165,7 @@ vuint32m4_t test_vluxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_tumu( @@ -1174,7 +1174,7 @@ vuint64m1_t test_vluxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_tumu( @@ -1183,7 +1183,7 @@ vuint64m2_t test_vluxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_tumu( @@ -1192,7 +1192,7 @@ vuint64m4_t test_vluxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_mu( @@ -1201,7 +1201,7 @@ vuint64m8_t test_vluxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_mu( @@ -1210,7 +1210,7 @@ vfloat16mf4_t test_vluxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_mu( @@ -1219,7 +1219,7 @@ vfloat16mf2_t test_vluxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_mu( @@ -1228,7 +1228,7 @@ vfloat16m1_t test_vluxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_mu( @@ -1237,7 +1237,7 @@ vfloat16m2_t test_vluxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_mu( @@ -1246,7 +1246,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_mu( @@ -1255,7 +1255,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_mu( @@ -1264,7 +1264,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_mu( @@ -1273,7 +1273,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_mu( @@ -1282,7 +1282,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_mu( @@ -1291,7 +1291,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_mu( @@ -1300,7 +1300,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_f64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_mu( @@ -1309,7 +1309,7 @@ vfloat64m8_t test_vluxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_mu( @@ -1318,7 +1318,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_mu( @@ -1327,7 +1327,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_mu( @@ -1336,7 +1336,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_mu( @@ -1345,7 +1345,7 @@ vint8m1_t test_vluxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_mu( @@ -1354,7 +1354,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_mu( @@ -1363,7 +1363,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_mu( @@ -1372,7 +1372,7 @@ vint16m1_t test_vluxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_mu( @@ -1381,7 +1381,7 @@ vint16m2_t test_vluxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_mu( @@ -1390,7 +1390,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_mu( @@ -1399,7 +1399,7 @@ vint32m1_t test_vluxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_mu( @@ -1408,7 +1408,7 @@ vint32m2_t test_vluxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_mu( @@ -1417,7 +1417,7 @@ vint32m4_t test_vluxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_i64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_mu( @@ -1426,7 +1426,7 @@ vint64m1_t test_vluxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_i64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_mu( @@ -1435,7 +1435,7 @@ vint64m2_t test_vluxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_i64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_mu( @@ -1444,7 +1444,7 @@ vint64m4_t test_vluxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_i64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_i64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_mu( @@ -1453,7 +1453,7 @@ vint64m8_t test_vluxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_mu( @@ -1462,7 +1462,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_mu( @@ -1471,7 +1471,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_mu( @@ -1480,7 +1480,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_mu( @@ -1489,7 +1489,7 @@ vuint8m1_t test_vluxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_mu( @@ -1498,7 +1498,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_mu( @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_mu( @@ -1516,7 +1516,7 @@ vuint16m1_t test_vluxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_mu( @@ -1525,7 +1525,7 @@ vuint16m2_t test_vluxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_mu( @@ -1534,7 +1534,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_mu( @@ -1543,7 +1543,7 @@ vuint32m1_t test_vluxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_mu( @@ -1552,7 +1552,7 @@ vuint32m2_t test_vluxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_mu( @@ -1561,7 +1561,7 @@ vuint32m4_t test_vluxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_u64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_mu( @@ -1570,7 +1570,7 @@ vuint64m1_t test_vluxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_u64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_mu( @@ -1579,7 +1579,7 @@ vuint64m2_t test_vluxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_u64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_mu( @@ -1588,6 +1588,6 @@ vuint64m4_t test_vluxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_u64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei64_v_u64m8_mu(mask, maskedoff, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei8.c index ee0b84c2cac5..679b80ebf92b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxei8.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const _Float16 * // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const _Float16 * // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vluxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vluxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vluxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_f16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vluxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const _Float16 *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float *bas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_tu( @@ -148,7 +148,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const double *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_tu( @@ -157,7 +157,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_tu( @@ -166,7 +166,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_tu( @@ -175,7 +175,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_tu( @@ -184,7 +184,7 @@ vint8m1_t test_vluxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_tu( @@ -193,7 +193,7 @@ vint8m2_t test_vluxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i8m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_tu( @@ -202,7 +202,7 @@ vint8m4_t test_vluxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vluxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_i8m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_tu( @@ -211,7 +211,7 @@ vint8m8_t test_vluxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_tu( @@ -220,7 +220,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_tu( @@ -229,7 +229,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_tu( @@ -238,7 +238,7 @@ vint16m1_t test_vluxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_tu( @@ -247,7 +247,7 @@ vint16m2_t test_vluxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_tu( @@ -256,7 +256,7 @@ vint16m4_t test_vluxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_tu( @@ -265,7 +265,7 @@ vint16m8_t test_vluxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_tu( @@ -274,7 +274,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_tu( @@ -283,7 +283,7 @@ vint32m1_t test_vluxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_tu( @@ -292,7 +292,7 @@ vint32m2_t test_vluxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_tu( @@ -301,7 +301,7 @@ vint32m4_t test_vluxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_tu( @@ -310,7 +310,7 @@ vint32m8_t test_vluxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_tu( @@ -319,7 +319,7 @@ vint64m1_t test_vluxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_tu( @@ -328,7 +328,7 @@ vint64m2_t test_vluxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_tu( @@ -337,7 +337,7 @@ vint64m4_t test_vluxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_tu( @@ -346,7 +346,7 @@ vint64m8_t test_vluxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u8mf8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_tu( @@ -355,7 +355,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u8mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_tu( @@ -364,7 +364,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u8mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_tu( @@ -373,7 +373,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u8m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_tu( @@ -382,7 +382,7 @@ vuint8m1_t test_vluxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u8m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_tu( @@ -391,7 +391,7 @@ vuint8m2_t test_vluxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u8m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_tu( @@ -400,7 +400,7 @@ vuint8m4_t test_vluxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vluxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_u8m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_tu( @@ -409,7 +409,7 @@ vuint8m8_t test_vluxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u16mf4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16mf4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_tu( @@ -418,7 +418,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u16mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_tu( @@ -427,7 +427,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u16m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_tu( @@ -436,7 +436,7 @@ vuint16m1_t test_vluxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u16m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_tu( @@ -445,7 +445,7 @@ vuint16m2_t test_vluxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u16m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_tu( @@ -454,7 +454,7 @@ vuint16m4_t test_vluxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u16m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_tu( @@ -463,7 +463,7 @@ vuint16m8_t test_vluxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u32mf2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32mf2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_tu( @@ -472,7 +472,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *ba // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u32m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_tu( @@ -481,7 +481,7 @@ vuint32m1_t test_vluxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u32m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_tu( @@ -490,7 +490,7 @@ vuint32m2_t test_vluxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u32m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_tu( @@ -499,7 +499,7 @@ vuint32m4_t test_vluxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u32m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_tu( @@ -508,7 +508,7 @@ vuint32m8_t test_vluxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u64m1_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m1_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_tu( @@ -517,7 +517,7 @@ vuint64m1_t test_vluxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u64m2_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m2_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_tu( @@ -526,7 +526,7 @@ vuint64m2_t test_vluxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u64m4_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m4_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_tu( @@ -535,7 +535,7 @@ vuint64m4_t test_vluxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u64m8_tu(maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m8_tu(maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_tum( @@ -544,7 +544,7 @@ vuint64m8_t test_vluxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_tum( @@ -553,7 +553,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_tum( @@ -562,7 +562,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_tum( @@ -571,7 +571,7 @@ vfloat16m1_t test_vluxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_tum( @@ -580,7 +580,7 @@ vfloat16m2_t test_vluxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_tum( @@ -589,7 +589,7 @@ vfloat16m4_t test_vluxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_f16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_tum( @@ -598,7 +598,7 @@ vfloat16m8_t test_vluxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_tum( @@ -607,7 +607,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_tum( @@ -616,7 +616,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_tum( @@ -625,7 +625,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_tum( @@ -634,7 +634,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_tum( @@ -643,7 +643,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_tum( @@ -652,7 +652,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_tum( @@ -661,7 +661,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_tum( @@ -670,7 +670,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_tum( @@ -679,7 +679,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_tum( @@ -688,7 +688,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_tum( @@ -697,7 +697,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_tum( @@ -706,7 +706,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_tum( @@ -715,7 +715,7 @@ vint8m1_t test_vluxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_tum( @@ -724,7 +724,7 @@ vint8m2_t test_vluxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i8m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_tum( @@ -733,7 +733,7 @@ vint8m4_t test_vluxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vluxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_i8m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_tum( @@ -742,7 +742,7 @@ vint8m8_t test_vluxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_tum( @@ -751,7 +751,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_tum( @@ -760,7 +760,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_tum( @@ -769,7 +769,7 @@ vint16m1_t test_vluxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_tum( @@ -778,7 +778,7 @@ vint16m2_t test_vluxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_tum( @@ -787,7 +787,7 @@ vint16m4_t test_vluxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_tum( @@ -796,7 +796,7 @@ vint16m8_t test_vluxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_tum( @@ -805,7 +805,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_tum( @@ -814,7 +814,7 @@ vint32m1_t test_vluxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_tum( @@ -823,7 +823,7 @@ vint32m2_t test_vluxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_tum( @@ -832,7 +832,7 @@ vint32m4_t test_vluxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_tum( @@ -841,7 +841,7 @@ vint32m8_t test_vluxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_tum( @@ -850,7 +850,7 @@ vint64m1_t test_vluxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_tum( @@ -859,7 +859,7 @@ vint64m2_t test_vluxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_tum( @@ -868,7 +868,7 @@ vint64m4_t test_vluxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_tum( @@ -877,7 +877,7 @@ vint64m8_t test_vluxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_tum( @@ -886,7 +886,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_tum( @@ -895,7 +895,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_tum( @@ -904,7 +904,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u8m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_tum( @@ -913,7 +913,7 @@ vuint8m1_t test_vluxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u8m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_tum( @@ -922,7 +922,7 @@ vuint8m2_t test_vluxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u8m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_tum( @@ -931,7 +931,7 @@ vuint8m4_t test_vluxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vluxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_u8m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_tum( @@ -940,7 +940,7 @@ vuint8m8_t test_vluxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_tum( @@ -949,7 +949,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_tum( @@ -958,7 +958,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u16m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_tum( @@ -967,7 +967,7 @@ vuint16m1_t test_vluxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u16m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_tum( @@ -976,7 +976,7 @@ vuint16m2_t test_vluxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u16m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_tum( @@ -985,7 +985,7 @@ vuint16m4_t test_vluxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u16m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_tum( @@ -994,7 +994,7 @@ vuint16m8_t test_vluxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_tum( @@ -1003,7 +1003,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u32m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_tum( @@ -1012,7 +1012,7 @@ vuint32m1_t test_vluxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u32m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_tum( @@ -1021,7 +1021,7 @@ vuint32m2_t test_vluxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u32m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_tum( @@ -1030,7 +1030,7 @@ vuint32m4_t test_vluxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u32m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_tum( @@ -1039,7 +1039,7 @@ vuint32m8_t test_vluxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u64m1_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m1_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_tum( @@ -1048,7 +1048,7 @@ vuint64m1_t test_vluxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u64m2_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m2_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_tum( @@ -1057,7 +1057,7 @@ vuint64m2_t test_vluxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u64m4_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m4_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_tum( @@ -1066,7 +1066,7 @@ vuint64m4_t test_vluxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u64m8_tum(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m8_tum(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_tumu( @@ -1075,7 +1075,7 @@ vuint64m8_t test_vluxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_tumu( @@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_tumu( @@ -1093,7 +1093,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_tumu( @@ -1102,7 +1102,7 @@ vfloat16m1_t test_vluxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_tumu( @@ -1111,7 +1111,7 @@ vfloat16m2_t test_vluxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_tumu( @@ -1120,7 +1120,7 @@ vfloat16m4_t test_vluxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_tumu( @@ -1129,7 +1129,7 @@ vfloat16m8_t test_vluxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_tumu( @@ -1138,7 +1138,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_tumu( @@ -1147,7 +1147,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_tumu( @@ -1156,7 +1156,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_tumu( @@ -1165,7 +1165,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_tumu( @@ -1174,7 +1174,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_tumu( @@ -1183,7 +1183,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_tumu( @@ -1192,7 +1192,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_tumu( @@ -1201,7 +1201,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, c // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_tumu( @@ -1210,7 +1210,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_tumu( @@ -1219,7 +1219,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_tumu( @@ -1228,7 +1228,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_tumu( @@ -1237,7 +1237,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_tumu( @@ -1246,7 +1246,7 @@ vint8m1_t test_vluxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_tumu( @@ -1255,7 +1255,7 @@ vint8m2_t test_vluxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_tumu( @@ -1264,7 +1264,7 @@ vint8m4_t test_vluxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vluxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_i8m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_tumu( @@ -1273,7 +1273,7 @@ vint8m8_t test_vluxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_tumu( @@ -1282,7 +1282,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_tumu( @@ -1291,7 +1291,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_tumu( @@ -1300,7 +1300,7 @@ vint16m1_t test_vluxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_tumu( @@ -1309,7 +1309,7 @@ vint16m2_t test_vluxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_tumu( @@ -1318,7 +1318,7 @@ vint16m4_t test_vluxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_tumu( @@ -1327,7 +1327,7 @@ vint16m8_t test_vluxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_tumu( @@ -1336,7 +1336,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_tumu( @@ -1345,7 +1345,7 @@ vint32m1_t test_vluxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_tumu( @@ -1354,7 +1354,7 @@ vint32m2_t test_vluxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_tumu( @@ -1363,7 +1363,7 @@ vint32m4_t test_vluxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_tumu( @@ -1372,7 +1372,7 @@ vint32m8_t test_vluxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_tumu( @@ -1381,7 +1381,7 @@ vint64m1_t test_vluxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_tumu( @@ -1390,7 +1390,7 @@ vint64m2_t test_vluxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_tumu( @@ -1399,7 +1399,7 @@ vint64m4_t test_vluxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_tumu( @@ -1408,7 +1408,7 @@ vint64m8_t test_vluxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_tumu( @@ -1417,7 +1417,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_tumu( @@ -1426,7 +1426,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_tumu( @@ -1435,7 +1435,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_tumu( @@ -1444,7 +1444,7 @@ vuint8m1_t test_vluxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_tumu( @@ -1453,7 +1453,7 @@ vuint8m2_t test_vluxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_tumu( @@ -1462,7 +1462,7 @@ vuint8m4_t test_vluxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vluxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_u8m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_tumu( @@ -1471,7 +1471,7 @@ vuint8m8_t test_vluxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_tumu( @@ -1480,7 +1480,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_tumu( @@ -1489,7 +1489,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_tumu( @@ -1498,7 +1498,7 @@ vuint16m1_t test_vluxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_tumu( @@ -1507,7 +1507,7 @@ vuint16m2_t test_vluxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_tumu( @@ -1516,7 +1516,7 @@ vuint16m4_t test_vluxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_tumu( @@ -1525,7 +1525,7 @@ vuint16m8_t test_vluxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_tumu( @@ -1534,7 +1534,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_tumu( @@ -1543,7 +1543,7 @@ vuint32m1_t test_vluxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_tumu( @@ -1552,7 +1552,7 @@ vuint32m2_t test_vluxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_tumu( @@ -1561,7 +1561,7 @@ vuint32m4_t test_vluxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_tumu( @@ -1570,7 +1570,7 @@ vuint32m8_t test_vluxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_tumu( @@ -1579,7 +1579,7 @@ vuint64m1_t test_vluxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_tumu( @@ -1588,7 +1588,7 @@ vuint64m2_t test_vluxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_tumu( @@ -1597,7 +1597,7 @@ vuint64m4_t test_vluxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_mu( @@ -1606,7 +1606,7 @@ vuint64m8_t test_vluxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vluxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_mu( @@ -1615,7 +1615,7 @@ vfloat16mf4_t test_vluxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vluxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_mu( @@ -1624,7 +1624,7 @@ vfloat16mf2_t test_vluxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vluxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_mu( @@ -1633,7 +1633,7 @@ vfloat16m1_t test_vluxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vluxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_mu( @@ -1642,7 +1642,7 @@ vfloat16m2_t test_vluxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vluxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_mu( @@ -1651,7 +1651,7 @@ vfloat16m4_t test_vluxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vluxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_f16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_mu( @@ -1660,7 +1660,7 @@ vfloat16m8_t test_vluxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vluxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_mu( @@ -1669,7 +1669,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vluxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_mu( @@ -1678,7 +1678,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vluxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_mu( @@ -1687,7 +1687,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vluxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_mu( @@ -1696,7 +1696,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vluxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_mu( @@ -1705,7 +1705,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vluxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_mu( @@ -1714,7 +1714,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vluxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_mu( @@ -1723,7 +1723,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vluxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_mu( @@ -1732,7 +1732,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vluxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_f64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_mu( @@ -1741,7 +1741,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vluxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_mu( @@ -1750,7 +1750,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vluxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_mu( @@ -1759,7 +1759,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vluxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_mu( @@ -1768,7 +1768,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vluxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_mu( @@ -1777,7 +1777,7 @@ vint8m1_t test_vluxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vluxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_mu( @@ -1786,7 +1786,7 @@ vint8m2_t test_vluxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vluxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i8m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_mu( @@ -1795,7 +1795,7 @@ vint8m4_t test_vluxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vluxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_i8m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i8m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_mu( @@ -1804,7 +1804,7 @@ vint8m8_t test_vluxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vluxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_mu( @@ -1813,7 +1813,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vluxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_mu( @@ -1822,7 +1822,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vluxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_mu( @@ -1831,7 +1831,7 @@ vint16m1_t test_vluxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vluxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_mu( @@ -1840,7 +1840,7 @@ vint16m2_t test_vluxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vluxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_mu( @@ -1849,7 +1849,7 @@ vint16m4_t test_vluxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vluxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_i16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_mu( @@ -1858,7 +1858,7 @@ vint16m8_t test_vluxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vluxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_mu( @@ -1867,7 +1867,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, cons // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vluxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_mu( @@ -1876,7 +1876,7 @@ vint32m1_t test_vluxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vluxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_mu( @@ -1885,7 +1885,7 @@ vint32m2_t test_vluxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vluxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_mu( @@ -1894,7 +1894,7 @@ vint32m4_t test_vluxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vluxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_i32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_mu( @@ -1903,7 +1903,7 @@ vint32m8_t test_vluxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vluxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_i64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_mu( @@ -1912,7 +1912,7 @@ vint64m1_t test_vluxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vluxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_i64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_mu( @@ -1921,7 +1921,7 @@ vint64m2_t test_vluxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vluxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_i64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_mu( @@ -1930,7 +1930,7 @@ vint64m4_t test_vluxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vluxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_i64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_i64m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_mu( @@ -1939,7 +1939,7 @@ vint64m8_t test_vluxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vluxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_mu( @@ -1948,7 +1948,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vluxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_mu( @@ -1957,7 +1957,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vluxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_mu( @@ -1966,7 +1966,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vluxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u8m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_mu( @@ -1975,7 +1975,7 @@ vuint8m1_t test_vluxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vluxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u8m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_mu( @@ -1984,7 +1984,7 @@ vuint8m2_t test_vluxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vluxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u8m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_mu( @@ -1993,7 +1993,7 @@ vuint8m4_t test_vluxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vluxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8_v_u8m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u8m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_mu( @@ -2002,7 +2002,7 @@ vuint8m8_t test_vluxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vluxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_mu( @@ -2011,7 +2011,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vluxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_mu( @@ -2020,7 +2020,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vluxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u16m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_mu( @@ -2029,7 +2029,7 @@ vuint16m1_t test_vluxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vluxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u16m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_mu( @@ -2038,7 +2038,7 @@ vuint16m2_t test_vluxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vluxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u16m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_mu( @@ -2047,7 +2047,7 @@ vuint16m4_t test_vluxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vluxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_u16m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u16m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_mu( @@ -2056,7 +2056,7 @@ vuint16m8_t test_vluxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vluxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_mu( @@ -2065,7 +2065,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, co // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vluxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u32m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_mu( @@ -2074,7 +2074,7 @@ vuint32m1_t test_vluxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vluxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u32m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_mu( @@ -2083,7 +2083,7 @@ vuint32m2_t test_vluxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vluxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u32m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_mu( @@ -2092,7 +2092,7 @@ vuint32m4_t test_vluxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vluxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_u32m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u32m8_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_mu( @@ -2101,7 +2101,7 @@ vuint32m8_t test_vluxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vluxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_u64m1_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m1_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_mu( @@ -2110,7 +2110,7 @@ vuint64m1_t test_vluxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vluxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_u64m2_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m2_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_mu( @@ -2119,7 +2119,7 @@ vuint64m2_t test_vluxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vluxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_u64m4_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m4_mu(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_mu( @@ -2128,6 +2128,6 @@ vuint64m4_t test_vluxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vluxei8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_u64m8_mu(mask, maskedoff, base, bindex, vl); + return __riscv_vluxei8_v_u64m8_mu(mask, maskedoff, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c index 506c053a1a7a..893c6d0e6411 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei16.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_tu( @@ -30,7 +30,7 @@ void test_vluxseg2ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_tu( @@ -43,7 +43,7 @@ void test_vluxseg2ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_tu( @@ -56,7 +56,7 @@ void test_vluxseg2ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_tu( @@ -69,7 +69,7 @@ void test_vluxseg2ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_tu( @@ -82,7 +82,7 @@ void test_vluxseg2ei16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_tu( @@ -95,7 +95,7 @@ void test_vluxseg2ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_tu( @@ -108,7 +108,7 @@ void test_vluxseg2ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_tu( @@ -121,7 +121,7 @@ void test_vluxseg2ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_tu( @@ -134,7 +134,7 @@ void test_vluxseg2ei16_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_tu( @@ -147,7 +147,7 @@ void test_vluxseg2ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_tu( @@ -160,7 +160,7 @@ void test_vluxseg2ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_tu( @@ -173,7 +173,7 @@ void test_vluxseg2ei16_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_tu( @@ -186,7 +186,7 @@ void test_vluxseg2ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_tu( @@ -199,7 +199,7 @@ void test_vluxseg2ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_tu( @@ -212,7 +212,7 @@ void test_vluxseg2ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_tu( @@ -225,7 +225,7 @@ void test_vluxseg2ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_tu( @@ -238,7 +238,7 @@ void test_vluxseg2ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_tu( @@ -251,7 +251,7 @@ void test_vluxseg2ei16_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_tu( @@ -264,7 +264,7 @@ void test_vluxseg2ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vluxseg2ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_tu( @@ -290,7 +290,7 @@ void test_vluxseg2ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_tu( @@ -303,7 +303,7 @@ void test_vluxseg2ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_tu( @@ -316,7 +316,7 @@ void test_vluxseg2ei16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_tu( @@ -329,7 +329,7 @@ void test_vluxseg2ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_tu( @@ -342,7 +342,7 @@ void test_vluxseg2ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_tu( @@ -355,7 +355,7 @@ void test_vluxseg2ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_tu( @@ -368,7 +368,7 @@ void test_vluxseg2ei16_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_tu( @@ -381,7 +381,7 @@ void test_vluxseg2ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_tu( @@ -394,7 +394,7 @@ void test_vluxseg2ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_tu( @@ -407,7 +407,7 @@ void test_vluxseg2ei16_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_tu( @@ -420,7 +420,7 @@ void test_vluxseg2ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_tu( @@ -433,7 +433,7 @@ void test_vluxseg2ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_tu( @@ -446,7 +446,7 @@ void test_vluxseg2ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_tu( @@ -459,7 +459,7 @@ void test_vluxseg2ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_tu( @@ -472,7 +472,7 @@ void test_vluxseg2ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_tu( @@ -485,7 +485,7 @@ void test_vluxseg2ei16_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_tu( @@ -498,7 +498,7 @@ void test_vluxseg2ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_tu( @@ -511,7 +511,7 @@ void test_vluxseg2ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_tu( @@ -524,7 +524,7 @@ void test_vluxseg2ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_tu( @@ -537,7 +537,7 @@ void test_vluxseg2ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_tu( @@ -550,7 +550,7 @@ void test_vluxseg2ei16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_tu( @@ -563,7 +563,7 @@ void test_vluxseg2ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_tu( @@ -576,7 +576,7 @@ void test_vluxseg2ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_tu( @@ -589,7 +589,7 @@ void test_vluxseg2ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vluxseg2ei16_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_tu( @@ -615,7 +615,7 @@ void test_vluxseg2ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_tu( @@ -628,7 +628,7 @@ void test_vluxseg2ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_tum( @@ -641,7 +641,7 @@ void test_vluxseg2ei16_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_tum( @@ -654,7 +654,7 @@ void test_vluxseg2ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_tum( @@ -667,7 +667,7 @@ void test_vluxseg2ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_tum( @@ -680,7 +680,7 @@ void test_vluxseg2ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_tum( @@ -693,7 +693,7 @@ void test_vluxseg2ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_tum( @@ -706,7 +706,7 @@ void test_vluxseg2ei16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_tum( @@ -719,7 +719,7 @@ void test_vluxseg2ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_tum( @@ -732,7 +732,7 @@ void test_vluxseg2ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_tum( @@ -745,7 +745,7 @@ void test_vluxseg2ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_tum( @@ -758,7 +758,7 @@ void test_vluxseg2ei16_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_tum( @@ -771,7 +771,7 @@ void test_vluxseg2ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_tum( @@ -784,7 +784,7 @@ void test_vluxseg2ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_tum( @@ -797,7 +797,7 @@ void test_vluxseg2ei16_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_tum( @@ -810,7 +810,7 @@ void test_vluxseg2ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_tum( @@ -823,7 +823,7 @@ void test_vluxseg2ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_tum( @@ -836,7 +836,7 @@ void test_vluxseg2ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_tum( @@ -849,7 +849,7 @@ void test_vluxseg2ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_tum( @@ -862,7 +862,7 @@ void test_vluxseg2ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_tum( @@ -875,7 +875,7 @@ void test_vluxseg2ei16_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_tum( @@ -888,7 +888,7 @@ void test_vluxseg2ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vluxseg2ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_tum( @@ -914,7 +914,7 @@ void test_vluxseg2ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_tum( @@ -927,7 +927,7 @@ void test_vluxseg2ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_tum( @@ -940,7 +940,7 @@ void test_vluxseg2ei16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_tum( @@ -953,7 +953,7 @@ void test_vluxseg2ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_tum( @@ -966,7 +966,7 @@ void test_vluxseg2ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_tum( @@ -979,7 +979,7 @@ void test_vluxseg2ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_tum( @@ -992,7 +992,7 @@ void test_vluxseg2ei16_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_tum( @@ -1005,7 +1005,7 @@ void test_vluxseg2ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_tum( @@ -1018,7 +1018,7 @@ void test_vluxseg2ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_tum( @@ -1031,7 +1031,7 @@ void test_vluxseg2ei16_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_tum( @@ -1044,7 +1044,7 @@ void test_vluxseg2ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_tum( @@ -1057,7 +1057,7 @@ void test_vluxseg2ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_tum( @@ -1070,7 +1070,7 @@ void test_vluxseg2ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_tum( @@ -1083,7 +1083,7 @@ void test_vluxseg2ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_tum( @@ -1096,7 +1096,7 @@ void test_vluxseg2ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_tum( @@ -1109,7 +1109,7 @@ void test_vluxseg2ei16_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_tum( @@ -1122,7 +1122,7 @@ void test_vluxseg2ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_tum( @@ -1135,7 +1135,7 @@ void test_vluxseg2ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_tum( @@ -1148,7 +1148,7 @@ void test_vluxseg2ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_tum( @@ -1161,7 +1161,7 @@ void test_vluxseg2ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_tum( @@ -1174,7 +1174,7 @@ void test_vluxseg2ei16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_tum( @@ -1187,7 +1187,7 @@ void test_vluxseg2ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_tum( @@ -1200,7 +1200,7 @@ void test_vluxseg2ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_tum( @@ -1213,7 +1213,7 @@ void test_vluxseg2ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_tum( @@ -1226,7 +1226,7 @@ void test_vluxseg2ei16_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_tum( @@ -1239,7 +1239,7 @@ void test_vluxseg2ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_tum( @@ -1252,7 +1252,7 @@ void test_vluxseg2ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_tumu( @@ -1265,7 +1265,7 @@ void test_vluxseg2ei16_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_tumu( @@ -1278,7 +1278,7 @@ void test_vluxseg2ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_tumu( @@ -1291,7 +1291,7 @@ void test_vluxseg2ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_tumu( @@ -1304,7 +1304,7 @@ void test_vluxseg2ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_tumu( @@ -1317,7 +1317,7 @@ void test_vluxseg2ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_tumu( @@ -1330,7 +1330,7 @@ void test_vluxseg2ei16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_tumu( @@ -1343,7 +1343,7 @@ void test_vluxseg2ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_tumu( @@ -1356,7 +1356,7 @@ void test_vluxseg2ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg2ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_tumu( @@ -1382,7 +1382,7 @@ void test_vluxseg2ei16_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_tumu( @@ -1395,7 +1395,7 @@ void test_vluxseg2ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_tumu( @@ -1408,7 +1408,7 @@ void test_vluxseg2ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_tumu( @@ -1421,7 +1421,7 @@ void test_vluxseg2ei16_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_tumu( @@ -1434,7 +1434,7 @@ void test_vluxseg2ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_tumu( @@ -1447,7 +1447,7 @@ void test_vluxseg2ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_tumu( @@ -1460,7 +1460,7 @@ void test_vluxseg2ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_tumu( @@ -1473,7 +1473,7 @@ void test_vluxseg2ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_tumu( @@ -1486,7 +1486,7 @@ void test_vluxseg2ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_tumu( @@ -1499,7 +1499,7 @@ void test_vluxseg2ei16_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_tumu( @@ -1512,7 +1512,7 @@ void test_vluxseg2ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_tumu( @@ -1525,7 +1525,7 @@ void test_vluxseg2ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_tumu( @@ -1538,7 +1538,7 @@ void test_vluxseg2ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_tumu( @@ -1551,7 +1551,7 @@ void test_vluxseg2ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_tumu( @@ -1564,7 +1564,7 @@ void test_vluxseg2ei16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_tumu( @@ -1577,7 +1577,7 @@ void test_vluxseg2ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_tumu( @@ -1590,7 +1590,7 @@ void test_vluxseg2ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_tumu( @@ -1603,7 +1603,7 @@ void test_vluxseg2ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_tumu( @@ -1616,7 +1616,7 @@ void test_vluxseg2ei16_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_tumu( @@ -1629,7 +1629,7 @@ void test_vluxseg2ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_tumu( @@ -1642,7 +1642,7 @@ void test_vluxseg2ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_tumu( @@ -1655,7 +1655,7 @@ void test_vluxseg2ei16_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_tumu( @@ -1668,7 +1668,7 @@ void test_vluxseg2ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_tumu( @@ -1681,7 +1681,7 @@ void test_vluxseg2ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_tumu( @@ -1694,7 +1694,7 @@ void test_vluxseg2ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_tumu( @@ -1707,7 +1707,7 @@ void test_vluxseg2ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_tumu( @@ -1720,7 +1720,7 @@ void test_vluxseg2ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_tumu( @@ -1733,7 +1733,7 @@ void test_vluxseg2ei16_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_tumu( @@ -1746,7 +1746,7 @@ void test_vluxseg2ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_tumu( @@ -1759,7 +1759,7 @@ void test_vluxseg2ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_tumu( @@ -1772,7 +1772,7 @@ void test_vluxseg2ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_tumu( @@ -1785,7 +1785,7 @@ void test_vluxseg2ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_tumu( @@ -1798,7 +1798,7 @@ void test_vluxseg2ei16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_tumu( @@ -1811,7 +1811,7 @@ void test_vluxseg2ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_tumu( @@ -1824,7 +1824,7 @@ void test_vluxseg2ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_tumu( @@ -1837,7 +1837,7 @@ void test_vluxseg2ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_tumu( @@ -1850,7 +1850,7 @@ void test_vluxseg2ei16_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_tumu( @@ -1863,7 +1863,7 @@ void test_vluxseg2ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_tumu( @@ -1876,7 +1876,7 @@ void test_vluxseg2ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_mu( @@ -1889,7 +1889,7 @@ void test_vluxseg2ei16_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_mu( @@ -1902,7 +1902,7 @@ void test_vluxseg2ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_mu( @@ -1915,7 +1915,7 @@ void test_vluxseg2ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_mu( @@ -1928,7 +1928,7 @@ void test_vluxseg2ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_mu( @@ -1941,7 +1941,7 @@ void test_vluxseg2ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_mu( @@ -1954,7 +1954,7 @@ void test_vluxseg2ei16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_mu( @@ -1967,7 +1967,7 @@ void test_vluxseg2ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_mu( @@ -1980,7 +1980,7 @@ void test_vluxseg2ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_mu( @@ -1993,7 +1993,7 @@ void test_vluxseg2ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_mu( @@ -2006,7 +2006,7 @@ void test_vluxseg2ei16_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_mu( @@ -2019,7 +2019,7 @@ void test_vluxseg2ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_mu( @@ -2032,7 +2032,7 @@ void test_vluxseg2ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_mu( @@ -2045,7 +2045,7 @@ void test_vluxseg2ei16_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_mu( @@ -2058,7 +2058,7 @@ void test_vluxseg2ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_mu( @@ -2071,7 +2071,7 @@ void test_vluxseg2ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_mu( @@ -2084,7 +2084,7 @@ void test_vluxseg2ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_mu( @@ -2097,7 +2097,7 @@ void test_vluxseg2ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_mu( @@ -2110,7 +2110,7 @@ void test_vluxseg2ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_mu( @@ -2123,7 +2123,7 @@ void test_vluxseg2ei16_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_mu( @@ -2136,7 +2136,7 @@ void test_vluxseg2ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_mu( @@ -2149,7 +2149,7 @@ void test_vluxseg2ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_mu( @@ -2162,7 +2162,7 @@ void test_vluxseg2ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_mu( @@ -2175,7 +2175,7 @@ void test_vluxseg2ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_mu( @@ -2188,7 +2188,7 @@ void test_vluxseg2ei16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_mu( @@ -2201,7 +2201,7 @@ void test_vluxseg2ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_mu( @@ -2214,7 +2214,7 @@ void test_vluxseg2ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_mu( @@ -2227,7 +2227,7 @@ void test_vluxseg2ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_mu( @@ -2240,7 +2240,7 @@ void test_vluxseg2ei16_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_mu( @@ -2253,7 +2253,7 @@ void test_vluxseg2ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_mu( @@ -2266,7 +2266,7 @@ void test_vluxseg2ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_mu( @@ -2279,7 +2279,7 @@ void test_vluxseg2ei16_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_mu( @@ -2292,7 +2292,7 @@ void test_vluxseg2ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_mu( @@ -2305,7 +2305,7 @@ void test_vluxseg2ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_mu( @@ -2318,7 +2318,7 @@ void test_vluxseg2ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_mu( @@ -2331,7 +2331,7 @@ void test_vluxseg2ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_mu( @@ -2344,7 +2344,7 @@ void test_vluxseg2ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_mu( @@ -2357,7 +2357,7 @@ void test_vluxseg2ei16_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_mu( @@ -2370,7 +2370,7 @@ void test_vluxseg2ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_mu( @@ -2383,7 +2383,7 @@ void test_vluxseg2ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_mu( @@ -2396,7 +2396,7 @@ void test_vluxseg2ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_mu( @@ -2409,7 +2409,7 @@ void test_vluxseg2ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_mu( @@ -2422,7 +2422,7 @@ void test_vluxseg2ei16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_mu( @@ -2435,7 +2435,7 @@ void test_vluxseg2ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_mu( @@ -2448,7 +2448,7 @@ void test_vluxseg2ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_mu( @@ -2461,7 +2461,7 @@ void test_vluxseg2ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_mu( @@ -2474,7 +2474,7 @@ void test_vluxseg2ei16_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_mu( @@ -2487,7 +2487,7 @@ void test_vluxseg2ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_mu( @@ -2500,6 +2500,6 @@ void test_vluxseg2ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei16_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei16_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c index a3679cee4057..5ea59a775c5c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei32.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_tu( @@ -30,7 +30,7 @@ void test_vluxseg2ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_tu( @@ -43,7 +43,7 @@ void test_vluxseg2ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_tu( @@ -56,7 +56,7 @@ void test_vluxseg2ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_tu( @@ -69,7 +69,7 @@ void test_vluxseg2ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_tu( @@ -82,7 +82,7 @@ void test_vluxseg2ei32_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_tu( @@ -95,7 +95,7 @@ void test_vluxseg2ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_tu( @@ -108,7 +108,7 @@ void test_vluxseg2ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_tu( @@ -121,7 +121,7 @@ void test_vluxseg2ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_tu( @@ -134,7 +134,7 @@ void test_vluxseg2ei32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_tu( @@ -147,7 +147,7 @@ void test_vluxseg2ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_tu( @@ -160,7 +160,7 @@ void test_vluxseg2ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_tu( @@ -173,7 +173,7 @@ void test_vluxseg2ei32_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_tu( @@ -186,7 +186,7 @@ void test_vluxseg2ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_tu( @@ -199,7 +199,7 @@ void test_vluxseg2ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_tu( @@ -212,7 +212,7 @@ void test_vluxseg2ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_tu( @@ -225,7 +225,7 @@ void test_vluxseg2ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_tu( @@ -238,7 +238,7 @@ void test_vluxseg2ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_tu( @@ -251,7 +251,7 @@ void test_vluxseg2ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_tu( @@ -264,7 +264,7 @@ void test_vluxseg2ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_tu( @@ -277,7 +277,7 @@ void test_vluxseg2ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_tu( @@ -290,7 +290,7 @@ void test_vluxseg2ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tu( @@ -303,7 +303,7 @@ void test_vluxseg2ei32_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_tu( @@ -316,7 +316,7 @@ void test_vluxseg2ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_tu( @@ -329,7 +329,7 @@ void test_vluxseg2ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_tu( @@ -342,7 +342,7 @@ void test_vluxseg2ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_tu( @@ -355,7 +355,7 @@ void test_vluxseg2ei32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_tu( @@ -368,7 +368,7 @@ void test_vluxseg2ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_tu( @@ -381,7 +381,7 @@ void test_vluxseg2ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_tu( @@ -394,7 +394,7 @@ void test_vluxseg2ei32_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_tu( @@ -407,7 +407,7 @@ void test_vluxseg2ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_tu( @@ -420,7 +420,7 @@ void test_vluxseg2ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_tu( @@ -433,7 +433,7 @@ void test_vluxseg2ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_tu( @@ -446,7 +446,7 @@ void test_vluxseg2ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_tu( @@ -459,7 +459,7 @@ void test_vluxseg2ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_tu( @@ -472,7 +472,7 @@ void test_vluxseg2ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_tu( @@ -485,7 +485,7 @@ void test_vluxseg2ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_tu( @@ -498,7 +498,7 @@ void test_vluxseg2ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_tu( @@ -511,7 +511,7 @@ void test_vluxseg2ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_tu( @@ -524,7 +524,7 @@ void test_vluxseg2ei32_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_tu( @@ -537,7 +537,7 @@ void test_vluxseg2ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_tu( @@ -550,7 +550,7 @@ void test_vluxseg2ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_tu( @@ -563,7 +563,7 @@ void test_vluxseg2ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_tu( @@ -576,7 +576,7 @@ void test_vluxseg2ei32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_tu( @@ -589,7 +589,7 @@ void test_vluxseg2ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_tu( @@ -602,7 +602,7 @@ void test_vluxseg2ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_tum( @@ -615,7 +615,7 @@ void test_vluxseg2ei32_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_tum( @@ -628,7 +628,7 @@ void test_vluxseg2ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_tum( @@ -641,7 +641,7 @@ void test_vluxseg2ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_tum( @@ -654,7 +654,7 @@ void test_vluxseg2ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_tum( @@ -667,7 +667,7 @@ void test_vluxseg2ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_tum( @@ -680,7 +680,7 @@ void test_vluxseg2ei32_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_tum( @@ -693,7 +693,7 @@ void test_vluxseg2ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_tum( @@ -706,7 +706,7 @@ void test_vluxseg2ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_tum( @@ -719,7 +719,7 @@ void test_vluxseg2ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_tum( @@ -732,7 +732,7 @@ void test_vluxseg2ei32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_tum( @@ -745,7 +745,7 @@ void test_vluxseg2ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_tum( @@ -758,7 +758,7 @@ void test_vluxseg2ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_tum( @@ -771,7 +771,7 @@ void test_vluxseg2ei32_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_tum( @@ -784,7 +784,7 @@ void test_vluxseg2ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_tum( @@ -797,7 +797,7 @@ void test_vluxseg2ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_tum( @@ -810,7 +810,7 @@ void test_vluxseg2ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_tum( @@ -823,7 +823,7 @@ void test_vluxseg2ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_tum( @@ -836,7 +836,7 @@ void test_vluxseg2ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_tum( @@ -849,7 +849,7 @@ void test_vluxseg2ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_tum( @@ -862,7 +862,7 @@ void test_vluxseg2ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_tum( @@ -875,7 +875,7 @@ void test_vluxseg2ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_tum( @@ -888,7 +888,7 @@ void test_vluxseg2ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tum( @@ -901,7 +901,7 @@ void test_vluxseg2ei32_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_tum( @@ -914,7 +914,7 @@ void test_vluxseg2ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_tum( @@ -927,7 +927,7 @@ void test_vluxseg2ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_tum( @@ -940,7 +940,7 @@ void test_vluxseg2ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_tum( @@ -953,7 +953,7 @@ void test_vluxseg2ei32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_tum( @@ -966,7 +966,7 @@ void test_vluxseg2ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_tum( @@ -979,7 +979,7 @@ void test_vluxseg2ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_tum( @@ -992,7 +992,7 @@ void test_vluxseg2ei32_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_tum( @@ -1005,7 +1005,7 @@ void test_vluxseg2ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_tum( @@ -1018,7 +1018,7 @@ void test_vluxseg2ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_tum( @@ -1031,7 +1031,7 @@ void test_vluxseg2ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_tum( @@ -1044,7 +1044,7 @@ void test_vluxseg2ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_tum( @@ -1057,7 +1057,7 @@ void test_vluxseg2ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_tum( @@ -1070,7 +1070,7 @@ void test_vluxseg2ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_tum( @@ -1083,7 +1083,7 @@ void test_vluxseg2ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_tum( @@ -1096,7 +1096,7 @@ void test_vluxseg2ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_tum( @@ -1109,7 +1109,7 @@ void test_vluxseg2ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_tum( @@ -1122,7 +1122,7 @@ void test_vluxseg2ei32_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_tum( @@ -1135,7 +1135,7 @@ void test_vluxseg2ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_tum( @@ -1148,7 +1148,7 @@ void test_vluxseg2ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_tum( @@ -1161,7 +1161,7 @@ void test_vluxseg2ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_tum( @@ -1174,7 +1174,7 @@ void test_vluxseg2ei32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_tum( @@ -1187,7 +1187,7 @@ void test_vluxseg2ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_tum( @@ -1200,7 +1200,7 @@ void test_vluxseg2ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_tumu( @@ -1213,7 +1213,7 @@ void test_vluxseg2ei32_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_tumu( @@ -1226,7 +1226,7 @@ void test_vluxseg2ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_tumu( @@ -1239,7 +1239,7 @@ void test_vluxseg2ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_tumu( @@ -1252,7 +1252,7 @@ void test_vluxseg2ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_tumu( @@ -1265,7 +1265,7 @@ void test_vluxseg2ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_tumu( @@ -1278,7 +1278,7 @@ void test_vluxseg2ei32_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_tumu( @@ -1291,7 +1291,7 @@ void test_vluxseg2ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_tumu( @@ -1304,7 +1304,7 @@ void test_vluxseg2ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_tumu( @@ -1317,7 +1317,7 @@ void test_vluxseg2ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_tumu( @@ -1330,7 +1330,7 @@ void test_vluxseg2ei32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_tumu( @@ -1343,7 +1343,7 @@ void test_vluxseg2ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_tumu( @@ -1356,7 +1356,7 @@ void test_vluxseg2ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg2ei32_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_tumu( @@ -1382,7 +1382,7 @@ void test_vluxseg2ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_tumu( @@ -1395,7 +1395,7 @@ void test_vluxseg2ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_tumu( @@ -1408,7 +1408,7 @@ void test_vluxseg2ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_tumu( @@ -1421,7 +1421,7 @@ void test_vluxseg2ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_tumu( @@ -1434,7 +1434,7 @@ void test_vluxseg2ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_tumu( @@ -1447,7 +1447,7 @@ void test_vluxseg2ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_tumu( @@ -1460,7 +1460,7 @@ void test_vluxseg2ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_tumu( @@ -1473,7 +1473,7 @@ void test_vluxseg2ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_tumu( @@ -1486,7 +1486,7 @@ void test_vluxseg2ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tumu( @@ -1499,7 +1499,7 @@ void test_vluxseg2ei32_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_tumu( @@ -1512,7 +1512,7 @@ void test_vluxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_tumu( @@ -1525,7 +1525,7 @@ void test_vluxseg2ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_tumu( @@ -1538,7 +1538,7 @@ void test_vluxseg2ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_tumu( @@ -1551,7 +1551,7 @@ void test_vluxseg2ei32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_tumu( @@ -1564,7 +1564,7 @@ void test_vluxseg2ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_tumu( @@ -1577,7 +1577,7 @@ void test_vluxseg2ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_tumu( @@ -1590,7 +1590,7 @@ void test_vluxseg2ei32_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_tumu( @@ -1603,7 +1603,7 @@ void test_vluxseg2ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_tumu( @@ -1616,7 +1616,7 @@ void test_vluxseg2ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_tumu( @@ -1629,7 +1629,7 @@ void test_vluxseg2ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_tumu( @@ -1642,7 +1642,7 @@ void test_vluxseg2ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_tumu( @@ -1655,7 +1655,7 @@ void test_vluxseg2ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_tumu( @@ -1668,7 +1668,7 @@ void test_vluxseg2ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_tumu( @@ -1681,7 +1681,7 @@ void test_vluxseg2ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_tumu( @@ -1694,7 +1694,7 @@ void test_vluxseg2ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_tumu( @@ -1707,7 +1707,7 @@ void test_vluxseg2ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_tumu( @@ -1720,7 +1720,7 @@ void test_vluxseg2ei32_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_tumu( @@ -1733,7 +1733,7 @@ void test_vluxseg2ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_tumu( @@ -1746,7 +1746,7 @@ void test_vluxseg2ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_tumu( @@ -1759,7 +1759,7 @@ void test_vluxseg2ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_tumu( @@ -1772,7 +1772,7 @@ void test_vluxseg2ei32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_tumu( @@ -1785,7 +1785,7 @@ void test_vluxseg2ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_tumu( @@ -1798,7 +1798,7 @@ void test_vluxseg2ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_mu( @@ -1811,7 +1811,7 @@ void test_vluxseg2ei32_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_mu( @@ -1824,7 +1824,7 @@ void test_vluxseg2ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_mu( @@ -1837,7 +1837,7 @@ void test_vluxseg2ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_mu( @@ -1850,7 +1850,7 @@ void test_vluxseg2ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_mu( @@ -1863,7 +1863,7 @@ void test_vluxseg2ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_mu( @@ -1876,7 +1876,7 @@ void test_vluxseg2ei32_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_mu( @@ -1889,7 +1889,7 @@ void test_vluxseg2ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_mu( @@ -1902,7 +1902,7 @@ void test_vluxseg2ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_mu( @@ -1915,7 +1915,7 @@ void test_vluxseg2ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_mu( @@ -1928,7 +1928,7 @@ void test_vluxseg2ei32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_mu( @@ -1941,7 +1941,7 @@ void test_vluxseg2ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_mu( @@ -1954,7 +1954,7 @@ void test_vluxseg2ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_mu( @@ -1967,7 +1967,7 @@ void test_vluxseg2ei32_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_mu( @@ -1980,7 +1980,7 @@ void test_vluxseg2ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_mu( @@ -1993,7 +1993,7 @@ void test_vluxseg2ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_mu( @@ -2006,7 +2006,7 @@ void test_vluxseg2ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_mu( @@ -2019,7 +2019,7 @@ void test_vluxseg2ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_mu( @@ -2032,7 +2032,7 @@ void test_vluxseg2ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_mu( @@ -2045,7 +2045,7 @@ void test_vluxseg2ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_mu( @@ -2058,7 +2058,7 @@ void test_vluxseg2ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_mu( @@ -2071,7 +2071,7 @@ void test_vluxseg2ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_mu( @@ -2084,7 +2084,7 @@ void test_vluxseg2ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_mu( @@ -2097,7 +2097,7 @@ void test_vluxseg2ei32_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_mu( @@ -2110,7 +2110,7 @@ void test_vluxseg2ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_mu( @@ -2123,7 +2123,7 @@ void test_vluxseg2ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_mu( @@ -2136,7 +2136,7 @@ void test_vluxseg2ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_mu( @@ -2149,7 +2149,7 @@ void test_vluxseg2ei32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_mu( @@ -2162,7 +2162,7 @@ void test_vluxseg2ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_mu( @@ -2175,7 +2175,7 @@ void test_vluxseg2ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_mu( @@ -2188,7 +2188,7 @@ void test_vluxseg2ei32_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_mu( @@ -2201,7 +2201,7 @@ void test_vluxseg2ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_mu( @@ -2214,7 +2214,7 @@ void test_vluxseg2ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_mu( @@ -2227,7 +2227,7 @@ void test_vluxseg2ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_mu( @@ -2240,7 +2240,7 @@ void test_vluxseg2ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_mu( @@ -2253,7 +2253,7 @@ void test_vluxseg2ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_mu( @@ -2266,7 +2266,7 @@ void test_vluxseg2ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_mu( @@ -2279,7 +2279,7 @@ void test_vluxseg2ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_mu( @@ -2292,7 +2292,7 @@ void test_vluxseg2ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_mu( @@ -2305,7 +2305,7 @@ void test_vluxseg2ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_mu( @@ -2318,7 +2318,7 @@ void test_vluxseg2ei32_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_mu( @@ -2331,7 +2331,7 @@ void test_vluxseg2ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_mu( @@ -2344,7 +2344,7 @@ void test_vluxseg2ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_mu( @@ -2357,7 +2357,7 @@ void test_vluxseg2ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_mu( @@ -2370,7 +2370,7 @@ void test_vluxseg2ei32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_mu( @@ -2383,7 +2383,7 @@ void test_vluxseg2ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_mu( @@ -2396,6 +2396,6 @@ void test_vluxseg2ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei32_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei32_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c index 1b067687bcd9..5ca087f80ce8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei64.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_tu( @@ -30,7 +30,7 @@ void test_vluxseg2ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_tu( @@ -43,7 +43,7 @@ void test_vluxseg2ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_tu( @@ -56,7 +56,7 @@ void test_vluxseg2ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_tu( @@ -69,7 +69,7 @@ void test_vluxseg2ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_tu( @@ -82,7 +82,7 @@ void test_vluxseg2ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_tu( @@ -95,7 +95,7 @@ void test_vluxseg2ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_tu( @@ -108,7 +108,7 @@ void test_vluxseg2ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_tu( @@ -121,7 +121,7 @@ void test_vluxseg2ei64_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_tu( @@ -134,7 +134,7 @@ void test_vluxseg2ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_tu( @@ -147,7 +147,7 @@ void test_vluxseg2ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_tu( @@ -160,7 +160,7 @@ void test_vluxseg2ei64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_tu( @@ -173,7 +173,7 @@ void test_vluxseg2ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_tu( @@ -186,7 +186,7 @@ void test_vluxseg2ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_tu( @@ -199,7 +199,7 @@ void test_vluxseg2ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_tu( @@ -212,7 +212,7 @@ void test_vluxseg2ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedo // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_tu( @@ -225,7 +225,7 @@ void test_vluxseg2ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_tu( @@ -238,7 +238,7 @@ void test_vluxseg2ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_tu( @@ -251,7 +251,7 @@ void test_vluxseg2ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_tu( @@ -264,7 +264,7 @@ void test_vluxseg2ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_tu( @@ -277,7 +277,7 @@ void test_vluxseg2ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_tu( @@ -290,7 +290,7 @@ void test_vluxseg2ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_tu( @@ -303,7 +303,7 @@ void test_vluxseg2ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_tu( @@ -316,7 +316,7 @@ void test_vluxseg2ei64_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_tu( @@ -329,7 +329,7 @@ void test_vluxseg2ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_tu( @@ -342,7 +342,7 @@ void test_vluxseg2ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_tu( @@ -355,7 +355,7 @@ void test_vluxseg2ei64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_tu( @@ -368,7 +368,7 @@ void test_vluxseg2ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_tu( @@ -381,7 +381,7 @@ void test_vluxseg2ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_tu( @@ -394,7 +394,7 @@ void test_vluxseg2ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_tu( @@ -407,7 +407,7 @@ void test_vluxseg2ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_tu( @@ -420,7 +420,7 @@ void test_vluxseg2ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_tu( @@ -433,7 +433,7 @@ void test_vluxseg2ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_tu( @@ -446,7 +446,7 @@ void test_vluxseg2ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_tu( @@ -459,7 +459,7 @@ void test_vluxseg2ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_tu( @@ -472,7 +472,7 @@ void test_vluxseg2ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_tu( @@ -485,7 +485,7 @@ void test_vluxseg2ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_tu( @@ -498,7 +498,7 @@ void test_vluxseg2ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_tu( @@ -511,7 +511,7 @@ void test_vluxseg2ei64_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_tu( @@ -524,7 +524,7 @@ void test_vluxseg2ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_tu( @@ -537,7 +537,7 @@ void test_vluxseg2ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_tum( @@ -550,7 +550,7 @@ void test_vluxseg2ei64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_tum( @@ -563,7 +563,7 @@ void test_vluxseg2ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_tum( @@ -576,7 +576,7 @@ void test_vluxseg2ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_tum( @@ -589,7 +589,7 @@ void test_vluxseg2ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_tum( @@ -602,7 +602,7 @@ void test_vluxseg2ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_tum( @@ -615,7 +615,7 @@ void test_vluxseg2ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_tum( @@ -628,7 +628,7 @@ void test_vluxseg2ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_tum( @@ -641,7 +641,7 @@ void test_vluxseg2ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_tum( @@ -654,7 +654,7 @@ void test_vluxseg2ei64_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_tum( @@ -667,7 +667,7 @@ void test_vluxseg2ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_tum( @@ -680,7 +680,7 @@ void test_vluxseg2ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_tum( @@ -693,7 +693,7 @@ void test_vluxseg2ei64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_tum( @@ -706,7 +706,7 @@ void test_vluxseg2ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_tum( @@ -719,7 +719,7 @@ void test_vluxseg2ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_tum( @@ -732,7 +732,7 @@ void test_vluxseg2ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_tum( @@ -745,7 +745,7 @@ void test_vluxseg2ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_tum( @@ -758,7 +758,7 @@ void test_vluxseg2ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_tum( @@ -771,7 +771,7 @@ void test_vluxseg2ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_tum( @@ -784,7 +784,7 @@ void test_vluxseg2ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_tum( @@ -797,7 +797,7 @@ void test_vluxseg2ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_tum( @@ -810,7 +810,7 @@ void test_vluxseg2ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_tum( @@ -823,7 +823,7 @@ void test_vluxseg2ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_tum( @@ -836,7 +836,7 @@ void test_vluxseg2ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_tum( @@ -849,7 +849,7 @@ void test_vluxseg2ei64_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_tum( @@ -862,7 +862,7 @@ void test_vluxseg2ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_tum( @@ -875,7 +875,7 @@ void test_vluxseg2ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_tum( @@ -888,7 +888,7 @@ void test_vluxseg2ei64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_tum( @@ -901,7 +901,7 @@ void test_vluxseg2ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_tum( @@ -914,7 +914,7 @@ void test_vluxseg2ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_tum( @@ -927,7 +927,7 @@ void test_vluxseg2ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_tum( @@ -940,7 +940,7 @@ void test_vluxseg2ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_tum( @@ -953,7 +953,7 @@ void test_vluxseg2ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_tum( @@ -966,7 +966,7 @@ void test_vluxseg2ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_tum( @@ -979,7 +979,7 @@ void test_vluxseg2ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_tum( @@ -992,7 +992,7 @@ void test_vluxseg2ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_tum( @@ -1005,7 +1005,7 @@ void test_vluxseg2ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_tum( @@ -1018,7 +1018,7 @@ void test_vluxseg2ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_tum( @@ -1031,7 +1031,7 @@ void test_vluxseg2ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_tum( @@ -1044,7 +1044,7 @@ void test_vluxseg2ei64_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_tum( @@ -1057,7 +1057,7 @@ void test_vluxseg2ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_tum( @@ -1070,7 +1070,7 @@ void test_vluxseg2ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_tumu( @@ -1083,7 +1083,7 @@ void test_vluxseg2ei64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_tumu( @@ -1096,7 +1096,7 @@ void test_vluxseg2ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_tumu( @@ -1109,7 +1109,7 @@ void test_vluxseg2ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_tumu( @@ -1122,7 +1122,7 @@ void test_vluxseg2ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_tumu( @@ -1135,7 +1135,7 @@ void test_vluxseg2ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_tumu( @@ -1148,7 +1148,7 @@ void test_vluxseg2ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_tumu( @@ -1161,7 +1161,7 @@ void test_vluxseg2ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_tumu( @@ -1174,7 +1174,7 @@ void test_vluxseg2ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_tumu( @@ -1187,7 +1187,7 @@ void test_vluxseg2ei64_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_tumu( @@ -1200,7 +1200,7 @@ void test_vluxseg2ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_tumu( @@ -1213,7 +1213,7 @@ void test_vluxseg2ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_tumu( @@ -1226,7 +1226,7 @@ void test_vluxseg2ei64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_tumu( @@ -1239,7 +1239,7 @@ void test_vluxseg2ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_tumu( @@ -1252,7 +1252,7 @@ void test_vluxseg2ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_tumu( @@ -1265,7 +1265,7 @@ void test_vluxseg2ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_tumu( @@ -1278,7 +1278,7 @@ void test_vluxseg2ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_tumu( @@ -1291,7 +1291,7 @@ void test_vluxseg2ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_tumu( @@ -1304,7 +1304,7 @@ void test_vluxseg2ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_tumu( @@ -1317,7 +1317,7 @@ void test_vluxseg2ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_tumu( @@ -1330,7 +1330,7 @@ void test_vluxseg2ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_tumu( @@ -1343,7 +1343,7 @@ void test_vluxseg2ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_tumu( @@ -1356,7 +1356,7 @@ void test_vluxseg2ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg2ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_tumu( @@ -1382,7 +1382,7 @@ void test_vluxseg2ei64_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_tumu( @@ -1395,7 +1395,7 @@ void test_vluxseg2ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_tumu( @@ -1408,7 +1408,7 @@ void test_vluxseg2ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_tumu( @@ -1421,7 +1421,7 @@ void test_vluxseg2ei64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_tumu( @@ -1434,7 +1434,7 @@ void test_vluxseg2ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_tumu( @@ -1447,7 +1447,7 @@ void test_vluxseg2ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_tumu( @@ -1460,7 +1460,7 @@ void test_vluxseg2ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_tumu( @@ -1473,7 +1473,7 @@ void test_vluxseg2ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_tumu( @@ -1486,7 +1486,7 @@ void test_vluxseg2ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_tumu( @@ -1499,7 +1499,7 @@ void test_vluxseg2ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_tumu( @@ -1512,7 +1512,7 @@ void test_vluxseg2ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_tumu( @@ -1525,7 +1525,7 @@ void test_vluxseg2ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_tumu( @@ -1538,7 +1538,7 @@ void test_vluxseg2ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_tumu( @@ -1551,7 +1551,7 @@ void test_vluxseg2ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_tumu( @@ -1564,7 +1564,7 @@ void test_vluxseg2ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_tumu( @@ -1577,7 +1577,7 @@ void test_vluxseg2ei64_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_tumu( @@ -1590,7 +1590,7 @@ void test_vluxseg2ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_tumu( @@ -1603,7 +1603,7 @@ void test_vluxseg2ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_mu( @@ -1616,7 +1616,7 @@ void test_vluxseg2ei64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_mu( @@ -1629,7 +1629,7 @@ void test_vluxseg2ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_mu( @@ -1642,7 +1642,7 @@ void test_vluxseg2ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_mu( @@ -1655,7 +1655,7 @@ void test_vluxseg2ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_mu( @@ -1668,7 +1668,7 @@ void test_vluxseg2ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_mu( @@ -1681,7 +1681,7 @@ void test_vluxseg2ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_mu( @@ -1694,7 +1694,7 @@ void test_vluxseg2ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_mu( @@ -1707,7 +1707,7 @@ void test_vluxseg2ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_mu( @@ -1720,7 +1720,7 @@ void test_vluxseg2ei64_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_mu( @@ -1733,7 +1733,7 @@ void test_vluxseg2ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_mu( @@ -1746,7 +1746,7 @@ void test_vluxseg2ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_mu( @@ -1759,7 +1759,7 @@ void test_vluxseg2ei64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_mu( @@ -1772,7 +1772,7 @@ void test_vluxseg2ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_mu( @@ -1785,7 +1785,7 @@ void test_vluxseg2ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_mu( @@ -1798,7 +1798,7 @@ void test_vluxseg2ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_mu( @@ -1811,7 +1811,7 @@ void test_vluxseg2ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_mu( @@ -1824,7 +1824,7 @@ void test_vluxseg2ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_mu( @@ -1837,7 +1837,7 @@ void test_vluxseg2ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_mu( @@ -1850,7 +1850,7 @@ void test_vluxseg2ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_mu( @@ -1863,7 +1863,7 @@ void test_vluxseg2ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_mu( @@ -1876,7 +1876,7 @@ void test_vluxseg2ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_mu( @@ -1889,7 +1889,7 @@ void test_vluxseg2ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_mu( @@ -1902,7 +1902,7 @@ void test_vluxseg2ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_mu( @@ -1915,7 +1915,7 @@ void test_vluxseg2ei64_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_mu( @@ -1928,7 +1928,7 @@ void test_vluxseg2ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_mu( @@ -1941,7 +1941,7 @@ void test_vluxseg2ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_mu( @@ -1954,7 +1954,7 @@ void test_vluxseg2ei64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_mu( @@ -1967,7 +1967,7 @@ void test_vluxseg2ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_mu( @@ -1980,7 +1980,7 @@ void test_vluxseg2ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_mu( @@ -1993,7 +1993,7 @@ void test_vluxseg2ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_mu( @@ -2006,7 +2006,7 @@ void test_vluxseg2ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_mu( @@ -2019,7 +2019,7 @@ void test_vluxseg2ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_mu( @@ -2032,7 +2032,7 @@ void test_vluxseg2ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_mu( @@ -2045,7 +2045,7 @@ void test_vluxseg2ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_mu( @@ -2058,7 +2058,7 @@ void test_vluxseg2ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_mu( @@ -2071,7 +2071,7 @@ void test_vluxseg2ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_mu( @@ -2084,7 +2084,7 @@ void test_vluxseg2ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_mu( @@ -2097,7 +2097,7 @@ void test_vluxseg2ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_mu( @@ -2110,7 +2110,7 @@ void test_vluxseg2ei64_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_mu( @@ -2123,7 +2123,7 @@ void test_vluxseg2ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_mu( @@ -2136,6 +2136,6 @@ void test_vluxseg2ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei64_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c index 9fb1a2f180f7..eb3f67c5b786 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg2ei8.c @@ -17,7 +17,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_tu( @@ -30,7 +30,7 @@ void test_vluxseg2ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_tu( @@ -43,7 +43,7 @@ void test_vluxseg2ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_tu( @@ -56,7 +56,7 @@ void test_vluxseg2ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_tu( @@ -69,7 +69,7 @@ void test_vluxseg2ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_tu( @@ -82,7 +82,7 @@ void test_vluxseg2ei8_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_tu( @@ -95,7 +95,7 @@ void test_vluxseg2ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_tu( @@ -108,7 +108,7 @@ void test_vluxseg2ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_tu( @@ -121,7 +121,7 @@ void test_vluxseg2ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_tu( @@ -134,7 +134,7 @@ void test_vluxseg2ei8_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_tu( @@ -147,7 +147,7 @@ void test_vluxseg2ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_tu( @@ -160,7 +160,7 @@ void test_vluxseg2ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_tu( @@ -173,7 +173,7 @@ void test_vluxseg2ei8_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_tu( @@ -186,7 +186,7 @@ void test_vluxseg2ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_tu( @@ -199,7 +199,7 @@ void test_vluxseg2ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_tu( @@ -212,7 +212,7 @@ void test_vluxseg2ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_tu( @@ -225,7 +225,7 @@ void test_vluxseg2ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedof // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_tu( @@ -238,7 +238,7 @@ void test_vluxseg2ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedof // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_tu( @@ -251,7 +251,7 @@ void test_vluxseg2ei8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedof // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_tu( @@ -264,7 +264,7 @@ void test_vluxseg2ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vluxseg2ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_tu( @@ -290,7 +290,7 @@ void test_vluxseg2ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_tu( @@ -303,7 +303,7 @@ void test_vluxseg2ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_tu( @@ -316,7 +316,7 @@ void test_vluxseg2ei8_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_tu( @@ -329,7 +329,7 @@ void test_vluxseg2ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_tu( @@ -342,7 +342,7 @@ void test_vluxseg2ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_tu( @@ -355,7 +355,7 @@ void test_vluxseg2ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_tu( @@ -368,7 +368,7 @@ void test_vluxseg2ei8_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_tu( @@ -381,7 +381,7 @@ void test_vluxseg2ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_tu( @@ -394,7 +394,7 @@ void test_vluxseg2ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_tu( @@ -407,7 +407,7 @@ void test_vluxseg2ei8_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_tu( @@ -420,7 +420,7 @@ void test_vluxseg2ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_tu( @@ -433,7 +433,7 @@ void test_vluxseg2ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_tu( @@ -446,7 +446,7 @@ void test_vluxseg2ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_tu( @@ -459,7 +459,7 @@ void test_vluxseg2ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maske // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_tu( @@ -472,7 +472,7 @@ void test_vluxseg2ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maske // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_tu( @@ -485,7 +485,7 @@ void test_vluxseg2ei8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maske // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_tu( @@ -498,7 +498,7 @@ void test_vluxseg2ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_tu( @@ -511,7 +511,7 @@ void test_vluxseg2ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_tu( @@ -524,7 +524,7 @@ void test_vluxseg2ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_tu( @@ -537,7 +537,7 @@ void test_vluxseg2ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_tu( @@ -550,7 +550,7 @@ void test_vluxseg2ei8_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_tu( @@ -563,7 +563,7 @@ void test_vluxseg2ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_tu( @@ -576,7 +576,7 @@ void test_vluxseg2ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_tu( @@ -589,7 +589,7 @@ void test_vluxseg2ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vluxseg2ei8_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_tu( @@ -615,7 +615,7 @@ void test_vluxseg2ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_tu( @@ -628,7 +628,7 @@ void test_vluxseg2ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_tum( @@ -641,7 +641,7 @@ void test_vluxseg2ei8_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_tum( @@ -654,7 +654,7 @@ void test_vluxseg2ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_tum( @@ -667,7 +667,7 @@ void test_vluxseg2ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_tum( @@ -680,7 +680,7 @@ void test_vluxseg2ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_tum( @@ -693,7 +693,7 @@ void test_vluxseg2ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_tum( @@ -706,7 +706,7 @@ void test_vluxseg2ei8_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_tum( @@ -719,7 +719,7 @@ void test_vluxseg2ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_tum( @@ -732,7 +732,7 @@ void test_vluxseg2ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_tum( @@ -745,7 +745,7 @@ void test_vluxseg2ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_tum( @@ -758,7 +758,7 @@ void test_vluxseg2ei8_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_tum( @@ -771,7 +771,7 @@ void test_vluxseg2ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_tum( @@ -784,7 +784,7 @@ void test_vluxseg2ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_tum( @@ -797,7 +797,7 @@ void test_vluxseg2ei8_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_tum( @@ -810,7 +810,7 @@ void test_vluxseg2ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_tum( @@ -823,7 +823,7 @@ void test_vluxseg2ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_tum( @@ -836,7 +836,7 @@ void test_vluxseg2ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_tum( @@ -849,7 +849,7 @@ void test_vluxseg2ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_tum( @@ -862,7 +862,7 @@ void test_vluxseg2ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_tum( @@ -875,7 +875,7 @@ void test_vluxseg2ei8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_tum( @@ -888,7 +888,7 @@ void test_vluxseg2ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vluxseg2ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_tum( @@ -914,7 +914,7 @@ void test_vluxseg2ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_tum( @@ -927,7 +927,7 @@ void test_vluxseg2ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_tum( @@ -940,7 +940,7 @@ void test_vluxseg2ei8_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_tum( @@ -953,7 +953,7 @@ void test_vluxseg2ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_tum( @@ -966,7 +966,7 @@ void test_vluxseg2ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_tum( @@ -979,7 +979,7 @@ void test_vluxseg2ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_tum( @@ -992,7 +992,7 @@ void test_vluxseg2ei8_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_tum( @@ -1005,7 +1005,7 @@ void test_vluxseg2ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_tum( @@ -1018,7 +1018,7 @@ void test_vluxseg2ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_tum( @@ -1031,7 +1031,7 @@ void test_vluxseg2ei8_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_tum( @@ -1044,7 +1044,7 @@ void test_vluxseg2ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_tum( @@ -1057,7 +1057,7 @@ void test_vluxseg2ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_tum( @@ -1070,7 +1070,7 @@ void test_vluxseg2ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_tum( @@ -1083,7 +1083,7 @@ void test_vluxseg2ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_tum( @@ -1096,7 +1096,7 @@ void test_vluxseg2ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_tum( @@ -1109,7 +1109,7 @@ void test_vluxseg2ei8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_tum( @@ -1122,7 +1122,7 @@ void test_vluxseg2ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_tum( @@ -1135,7 +1135,7 @@ void test_vluxseg2ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_tum( @@ -1148,7 +1148,7 @@ void test_vluxseg2ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_tum( @@ -1161,7 +1161,7 @@ void test_vluxseg2ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_tum( @@ -1174,7 +1174,7 @@ void test_vluxseg2ei8_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_tum( @@ -1187,7 +1187,7 @@ void test_vluxseg2ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_tum( @@ -1200,7 +1200,7 @@ void test_vluxseg2ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_tum( @@ -1213,7 +1213,7 @@ void test_vluxseg2ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_tum( @@ -1226,7 +1226,7 @@ void test_vluxseg2ei8_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_tum( @@ -1239,7 +1239,7 @@ void test_vluxseg2ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_tum( @@ -1252,7 +1252,7 @@ void test_vluxseg2ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_tumu( @@ -1265,7 +1265,7 @@ void test_vluxseg2ei8_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_tumu( @@ -1278,7 +1278,7 @@ void test_vluxseg2ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_tumu( @@ -1291,7 +1291,7 @@ void test_vluxseg2ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool3 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_tumu( @@ -1304,7 +1304,7 @@ void test_vluxseg2ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_tumu( @@ -1317,7 +1317,7 @@ void test_vluxseg2ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_tumu( @@ -1330,7 +1330,7 @@ void test_vluxseg2ei8_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_tumu( @@ -1343,7 +1343,7 @@ void test_vluxseg2ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool6 // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_tumu( @@ -1356,7 +1356,7 @@ void test_vluxseg2ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg2ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_tumu( @@ -1382,7 +1382,7 @@ void test_vluxseg2ei8_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_tumu( @@ -1395,7 +1395,7 @@ void test_vluxseg2ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_tumu( @@ -1408,7 +1408,7 @@ void test_vluxseg2ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_tumu( @@ -1421,7 +1421,7 @@ void test_vluxseg2ei8_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_tumu( @@ -1434,7 +1434,7 @@ void test_vluxseg2ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_tumu( @@ -1447,7 +1447,7 @@ void test_vluxseg2ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_tumu( @@ -1460,7 +1460,7 @@ void test_vluxseg2ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_tumu( @@ -1473,7 +1473,7 @@ void test_vluxseg2ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_tumu( @@ -1486,7 +1486,7 @@ void test_vluxseg2ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_tumu( @@ -1499,7 +1499,7 @@ void test_vluxseg2ei8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_tumu( @@ -1512,7 +1512,7 @@ void test_vluxseg2ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_tumu( @@ -1525,7 +1525,7 @@ void test_vluxseg2ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_tumu( @@ -1538,7 +1538,7 @@ void test_vluxseg2ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_tumu( @@ -1551,7 +1551,7 @@ void test_vluxseg2ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_tumu( @@ -1564,7 +1564,7 @@ void test_vluxseg2ei8_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_tumu( @@ -1577,7 +1577,7 @@ void test_vluxseg2ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_tumu( @@ -1590,7 +1590,7 @@ void test_vluxseg2ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_tumu( @@ -1603,7 +1603,7 @@ void test_vluxseg2ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_tumu( @@ -1616,7 +1616,7 @@ void test_vluxseg2ei8_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_tumu( @@ -1629,7 +1629,7 @@ void test_vluxseg2ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_tumu( @@ -1642,7 +1642,7 @@ void test_vluxseg2ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_tumu( @@ -1655,7 +1655,7 @@ void test_vluxseg2ei8_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_tumu( @@ -1668,7 +1668,7 @@ void test_vluxseg2ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_tumu( @@ -1681,7 +1681,7 @@ void test_vluxseg2ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_tumu( @@ -1694,7 +1694,7 @@ void test_vluxseg2ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_tumu( @@ -1707,7 +1707,7 @@ void test_vluxseg2ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_tumu( @@ -1720,7 +1720,7 @@ void test_vluxseg2ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_tumu( @@ -1733,7 +1733,7 @@ void test_vluxseg2ei8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_tumu( @@ -1746,7 +1746,7 @@ void test_vluxseg2ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_tumu( @@ -1759,7 +1759,7 @@ void test_vluxseg2ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_tumu( @@ -1772,7 +1772,7 @@ void test_vluxseg2ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_tumu( @@ -1785,7 +1785,7 @@ void test_vluxseg2ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_tumu( @@ -1798,7 +1798,7 @@ void test_vluxseg2ei8_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_tumu( @@ -1811,7 +1811,7 @@ void test_vluxseg2ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_tumu( @@ -1824,7 +1824,7 @@ void test_vluxseg2ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_tumu( @@ -1837,7 +1837,7 @@ void test_vluxseg2ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_tumu( @@ -1850,7 +1850,7 @@ void test_vluxseg2ei8_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_tumu( @@ -1863,7 +1863,7 @@ void test_vluxseg2ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_tumu( @@ -1876,7 +1876,7 @@ void test_vluxseg2ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_mu( @@ -1889,7 +1889,7 @@ void test_vluxseg2ei8_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_mu( @@ -1902,7 +1902,7 @@ void test_vluxseg2ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_mu( @@ -1915,7 +1915,7 @@ void test_vluxseg2ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_mu( @@ -1928,7 +1928,7 @@ void test_vluxseg2ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_mu( @@ -1941,7 +1941,7 @@ void test_vluxseg2ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_mu( @@ -1954,7 +1954,7 @@ void test_vluxseg2ei8_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_mu( @@ -1967,7 +1967,7 @@ void test_vluxseg2ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_mu( @@ -1980,7 +1980,7 @@ void test_vluxseg2ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_mu( @@ -1993,7 +1993,7 @@ void test_vluxseg2ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_mu( @@ -2006,7 +2006,7 @@ void test_vluxseg2ei8_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_mu( @@ -2019,7 +2019,7 @@ void test_vluxseg2ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_mu( @@ -2032,7 +2032,7 @@ void test_vluxseg2ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_mu( @@ -2045,7 +2045,7 @@ void test_vluxseg2ei8_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t m // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_mu( @@ -2058,7 +2058,7 @@ void test_vluxseg2ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_mu( @@ -2071,7 +2071,7 @@ void test_vluxseg2ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_mu( @@ -2084,7 +2084,7 @@ void test_vluxseg2ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_mu( @@ -2097,7 +2097,7 @@ void test_vluxseg2ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_mu( @@ -2110,7 +2110,7 @@ void test_vluxseg2ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_mu( @@ -2123,7 +2123,7 @@ void test_vluxseg2ei8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_mu( @@ -2136,7 +2136,7 @@ void test_vluxseg2ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_mu( @@ -2149,7 +2149,7 @@ void test_vluxseg2ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_mu( @@ -2162,7 +2162,7 @@ void test_vluxseg2ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_mu( @@ -2175,7 +2175,7 @@ void test_vluxseg2ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_mu( @@ -2188,7 +2188,7 @@ void test_vluxseg2ei8_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_mu( @@ -2201,7 +2201,7 @@ void test_vluxseg2ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_mu( @@ -2214,7 +2214,7 @@ void test_vluxseg2ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_mu( @@ -2227,7 +2227,7 @@ void test_vluxseg2ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_mu( @@ -2240,7 +2240,7 @@ void test_vluxseg2ei8_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_mu( @@ -2253,7 +2253,7 @@ void test_vluxseg2ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_mu( @@ -2266,7 +2266,7 @@ void test_vluxseg2ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_mu( @@ -2279,7 +2279,7 @@ void test_vluxseg2ei8_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_mu( @@ -2292,7 +2292,7 @@ void test_vluxseg2ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_mu( @@ -2305,7 +2305,7 @@ void test_vluxseg2ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_mu( @@ -2318,7 +2318,7 @@ void test_vluxseg2ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_mu( @@ -2331,7 +2331,7 @@ void test_vluxseg2ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_mu( @@ -2344,7 +2344,7 @@ void test_vluxseg2ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_mu( @@ -2357,7 +2357,7 @@ void test_vluxseg2ei8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, v // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_mu( @@ -2370,7 +2370,7 @@ void test_vluxseg2ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_mu( @@ -2383,7 +2383,7 @@ void test_vluxseg2ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_mu( @@ -2396,7 +2396,7 @@ void test_vluxseg2ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_mu( @@ -2409,7 +2409,7 @@ void test_vluxseg2ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_mu( @@ -2422,7 +2422,7 @@ void test_vluxseg2ei8_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_mu( @@ -2435,7 +2435,7 @@ void test_vluxseg2ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_mu( @@ -2448,7 +2448,7 @@ void test_vluxseg2ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_mu( @@ -2461,7 +2461,7 @@ void test_vluxseg2ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_mu( @@ -2474,7 +2474,7 @@ void test_vluxseg2ei8_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_mu( @@ -2487,7 +2487,7 @@ void test_vluxseg2ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_mu( @@ -2500,6 +2500,6 @@ void test_vluxseg2ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: ret void // void test_vluxseg2ei8_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); + return __riscv_vluxseg2ei8_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c index 55fd0c4588ae..c9698725fbb3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei16.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_tu( @@ -34,7 +34,7 @@ void test_vluxseg3ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_tu( @@ -49,7 +49,7 @@ void test_vluxseg3ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_tu( @@ -64,7 +64,7 @@ void test_vluxseg3ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_tu( @@ -79,7 +79,7 @@ void test_vluxseg3ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_tu( @@ -94,7 +94,7 @@ void test_vluxseg3ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_tu( @@ -109,7 +109,7 @@ void test_vluxseg3ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_tu( @@ -124,7 +124,7 @@ void test_vluxseg3ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_tu( @@ -139,7 +139,7 @@ void test_vluxseg3ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_tu( @@ -154,7 +154,7 @@ void test_vluxseg3ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_tu( @@ -169,7 +169,7 @@ void test_vluxseg3ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_tu( @@ -184,7 +184,7 @@ void test_vluxseg3ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_tu( @@ -199,7 +199,7 @@ void test_vluxseg3ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_tu( @@ -214,7 +214,7 @@ void test_vluxseg3ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_tu( @@ -229,7 +229,7 @@ void test_vluxseg3ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_tu( @@ -244,7 +244,7 @@ void test_vluxseg3ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_tu( @@ -259,7 +259,7 @@ void test_vluxseg3ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_tu( @@ -274,7 +274,7 @@ void test_vluxseg3ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_tu( @@ -289,7 +289,7 @@ void test_vluxseg3ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_tu( @@ -304,7 +304,7 @@ void test_vluxseg3ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_tu( @@ -319,7 +319,7 @@ void test_vluxseg3ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_tu( @@ -334,7 +334,7 @@ void test_vluxseg3ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_tu( @@ -349,7 +349,7 @@ void test_vluxseg3ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_tu( @@ -364,7 +364,7 @@ void test_vluxseg3ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_tu( @@ -379,7 +379,7 @@ void test_vluxseg3ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_tu( @@ -394,7 +394,7 @@ void test_vluxseg3ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_tu( @@ -409,7 +409,7 @@ void test_vluxseg3ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_tu( @@ -424,7 +424,7 @@ void test_vluxseg3ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_tu( @@ -439,7 +439,7 @@ void test_vluxseg3ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_tu( @@ -454,7 +454,7 @@ void test_vluxseg3ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_tu( @@ -469,7 +469,7 @@ void test_vluxseg3ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_tu( @@ -484,7 +484,7 @@ void test_vluxseg3ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_tu( @@ -499,7 +499,7 @@ void test_vluxseg3ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_tu( @@ -514,7 +514,7 @@ void test_vluxseg3ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_tu( @@ -529,7 +529,7 @@ void test_vluxseg3ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_tu( @@ -544,7 +544,7 @@ void test_vluxseg3ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_tu( @@ -559,7 +559,7 @@ void test_vluxseg3ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_tum( @@ -574,7 +574,7 @@ void test_vluxseg3ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_tum( @@ -589,7 +589,7 @@ void test_vluxseg3ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_tum( @@ -604,7 +604,7 @@ void test_vluxseg3ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_tum( @@ -619,7 +619,7 @@ void test_vluxseg3ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vluxseg3ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_tum( @@ -649,7 +649,7 @@ void test_vluxseg3ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_tum( @@ -664,7 +664,7 @@ void test_vluxseg3ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_tum( @@ -679,7 +679,7 @@ void test_vluxseg3ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_tum( @@ -694,7 +694,7 @@ void test_vluxseg3ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_tum( @@ -709,7 +709,7 @@ void test_vluxseg3ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_tum( @@ -724,7 +724,7 @@ void test_vluxseg3ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vluxseg3ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_tum( @@ -754,7 +754,7 @@ void test_vluxseg3ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_tum( @@ -769,7 +769,7 @@ void test_vluxseg3ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_tum( @@ -784,7 +784,7 @@ void test_vluxseg3ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_tum( @@ -799,7 +799,7 @@ void test_vluxseg3ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_tum( @@ -814,7 +814,7 @@ void test_vluxseg3ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_tum( @@ -829,7 +829,7 @@ void test_vluxseg3ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vluxseg3ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_tum( @@ -859,7 +859,7 @@ void test_vluxseg3ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_tum( @@ -874,7 +874,7 @@ void test_vluxseg3ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_tum( @@ -889,7 +889,7 @@ void test_vluxseg3ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_tum( @@ -904,7 +904,7 @@ void test_vluxseg3ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_tum( @@ -919,7 +919,7 @@ void test_vluxseg3ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_tum( @@ -934,7 +934,7 @@ void test_vluxseg3ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vluxseg3ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_tum( @@ -964,7 +964,7 @@ void test_vluxseg3ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_tum( @@ -979,7 +979,7 @@ void test_vluxseg3ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_tum( @@ -994,7 +994,7 @@ void test_vluxseg3ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_tum( @@ -1009,7 +1009,7 @@ void test_vluxseg3ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_tum( @@ -1024,7 +1024,7 @@ void test_vluxseg3ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_tum( @@ -1039,7 +1039,7 @@ void test_vluxseg3ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg3ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_tum( @@ -1069,7 +1069,7 @@ void test_vluxseg3ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_tum( @@ -1084,7 +1084,7 @@ void test_vluxseg3ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_tum( @@ -1099,7 +1099,7 @@ void test_vluxseg3ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_tum( @@ -1114,7 +1114,7 @@ void test_vluxseg3ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_tumu( @@ -1129,7 +1129,7 @@ void test_vluxseg3ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_tumu( @@ -1144,7 +1144,7 @@ void test_vluxseg3ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vluxseg3ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_tumu( @@ -1174,7 +1174,7 @@ void test_vluxseg3ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_tumu( @@ -1189,7 +1189,7 @@ void test_vluxseg3ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_tumu( @@ -1204,7 +1204,7 @@ void test_vluxseg3ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_tumu( @@ -1219,7 +1219,7 @@ void test_vluxseg3ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_tumu( @@ -1234,7 +1234,7 @@ void test_vluxseg3ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_tumu( @@ -1249,7 +1249,7 @@ void test_vluxseg3ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_tumu( @@ -1264,7 +1264,7 @@ void test_vluxseg3ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vluxseg3ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_tumu( @@ -1294,7 +1294,7 @@ void test_vluxseg3ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_tumu( @@ -1309,7 +1309,7 @@ void test_vluxseg3ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_tumu( @@ -1324,7 +1324,7 @@ void test_vluxseg3ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_tumu( @@ -1339,7 +1339,7 @@ void test_vluxseg3ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vluxseg3ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg3ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_tumu( @@ -1384,7 +1384,7 @@ void test_vluxseg3ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_tumu( @@ -1399,7 +1399,7 @@ void test_vluxseg3ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_tumu( @@ -1414,7 +1414,7 @@ void test_vluxseg3ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg3ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_tumu( @@ -1444,7 +1444,7 @@ void test_vluxseg3ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_tumu( @@ -1459,7 +1459,7 @@ void test_vluxseg3ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_tumu( @@ -1474,7 +1474,7 @@ void test_vluxseg3ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_tumu( @@ -1489,7 +1489,7 @@ void test_vluxseg3ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_tumu( @@ -1504,7 +1504,7 @@ void test_vluxseg3ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_tumu( @@ -1519,7 +1519,7 @@ void test_vluxseg3ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_tumu( @@ -1534,7 +1534,7 @@ void test_vluxseg3ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_tumu( @@ -1549,7 +1549,7 @@ void test_vluxseg3ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_tumu( @@ -1564,7 +1564,7 @@ void test_vluxseg3ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg3ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_tumu( @@ -1594,7 +1594,7 @@ void test_vluxseg3ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_tumu( @@ -1609,7 +1609,7 @@ void test_vluxseg3ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_tumu( @@ -1624,7 +1624,7 @@ void test_vluxseg3ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_tumu( @@ -1639,7 +1639,7 @@ void test_vluxseg3ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_tumu( @@ -1654,7 +1654,7 @@ void test_vluxseg3ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_tumu( @@ -1669,7 +1669,7 @@ void test_vluxseg3ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_mu( @@ -1684,7 +1684,7 @@ void test_vluxseg3ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_mu( @@ -1699,7 +1699,7 @@ void test_vluxseg3ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_mu( @@ -1714,7 +1714,7 @@ void test_vluxseg3ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_mu( @@ -1729,7 +1729,7 @@ void test_vluxseg3ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_mu( @@ -1744,7 +1744,7 @@ void test_vluxseg3ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_mu( @@ -1759,7 +1759,7 @@ void test_vluxseg3ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_mu( @@ -1774,7 +1774,7 @@ void test_vluxseg3ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_mu( @@ -1789,7 +1789,7 @@ void test_vluxseg3ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_mu( @@ -1804,7 +1804,7 @@ void test_vluxseg3ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_mu( @@ -1819,7 +1819,7 @@ void test_vluxseg3ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_mu( @@ -1834,7 +1834,7 @@ void test_vluxseg3ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_mu( @@ -1849,7 +1849,7 @@ void test_vluxseg3ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_mu( @@ -1864,7 +1864,7 @@ void test_vluxseg3ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_mu( @@ -1879,7 +1879,7 @@ void test_vluxseg3ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_mu( @@ -1894,7 +1894,7 @@ void test_vluxseg3ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_mu( @@ -1909,7 +1909,7 @@ void test_vluxseg3ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_mu( @@ -1924,7 +1924,7 @@ void test_vluxseg3ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_mu( @@ -1939,7 +1939,7 @@ void test_vluxseg3ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_mu( @@ -1954,7 +1954,7 @@ void test_vluxseg3ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_mu( @@ -1969,7 +1969,7 @@ void test_vluxseg3ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_mu( @@ -1984,7 +1984,7 @@ void test_vluxseg3ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_mu( @@ -1999,7 +1999,7 @@ void test_vluxseg3ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_mu( @@ -2014,7 +2014,7 @@ void test_vluxseg3ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_mu( @@ -2029,7 +2029,7 @@ void test_vluxseg3ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_mu( @@ -2044,7 +2044,7 @@ void test_vluxseg3ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_mu( @@ -2059,7 +2059,7 @@ void test_vluxseg3ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_mu( @@ -2074,7 +2074,7 @@ void test_vluxseg3ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_mu( @@ -2089,7 +2089,7 @@ void test_vluxseg3ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_mu( @@ -2104,7 +2104,7 @@ void test_vluxseg3ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_mu( @@ -2119,7 +2119,7 @@ void test_vluxseg3ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_mu( @@ -2134,7 +2134,7 @@ void test_vluxseg3ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_mu( @@ -2149,7 +2149,7 @@ void test_vluxseg3ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_mu( @@ -2164,7 +2164,7 @@ void test_vluxseg3ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_mu( @@ -2179,7 +2179,7 @@ void test_vluxseg3ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_mu( @@ -2194,7 +2194,7 @@ void test_vluxseg3ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_mu( @@ -2209,7 +2209,7 @@ void test_vluxseg3ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_mu( @@ -2224,6 +2224,6 @@ void test_vluxseg3ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei16_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c index 29e59a95e5ac..1d3393b0fa5e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei32.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_tu( @@ -34,7 +34,7 @@ void test_vluxseg3ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_tu( @@ -49,7 +49,7 @@ void test_vluxseg3ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_tu( @@ -64,7 +64,7 @@ void test_vluxseg3ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_tu( @@ -79,7 +79,7 @@ void test_vluxseg3ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_tu( @@ -94,7 +94,7 @@ void test_vluxseg3ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_tu( @@ -109,7 +109,7 @@ void test_vluxseg3ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_tu( @@ -124,7 +124,7 @@ void test_vluxseg3ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_tu( @@ -139,7 +139,7 @@ void test_vluxseg3ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_tu( @@ -154,7 +154,7 @@ void test_vluxseg3ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_tu( @@ -169,7 +169,7 @@ void test_vluxseg3ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_tu( @@ -184,7 +184,7 @@ void test_vluxseg3ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_tu( @@ -199,7 +199,7 @@ void test_vluxseg3ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_tu( @@ -214,7 +214,7 @@ void test_vluxseg3ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_tu( @@ -229,7 +229,7 @@ void test_vluxseg3ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_tu( @@ -244,7 +244,7 @@ void test_vluxseg3ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_tu( @@ -259,7 +259,7 @@ void test_vluxseg3ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_tu( @@ -274,7 +274,7 @@ void test_vluxseg3ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_tu( @@ -289,7 +289,7 @@ void test_vluxseg3ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_tu( @@ -304,7 +304,7 @@ void test_vluxseg3ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_tu( @@ -319,7 +319,7 @@ void test_vluxseg3ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_tu( @@ -334,7 +334,7 @@ void test_vluxseg3ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_tu( @@ -349,7 +349,7 @@ void test_vluxseg3ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_tu( @@ -364,7 +364,7 @@ void test_vluxseg3ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_tu( @@ -379,7 +379,7 @@ void test_vluxseg3ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_tu( @@ -394,7 +394,7 @@ void test_vluxseg3ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_tu( @@ -409,7 +409,7 @@ void test_vluxseg3ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_tu( @@ -424,7 +424,7 @@ void test_vluxseg3ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_tu( @@ -439,7 +439,7 @@ void test_vluxseg3ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_tu( @@ -454,7 +454,7 @@ void test_vluxseg3ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_tu( @@ -469,7 +469,7 @@ void test_vluxseg3ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_tu( @@ -484,7 +484,7 @@ void test_vluxseg3ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_tu( @@ -499,7 +499,7 @@ void test_vluxseg3ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_tu( @@ -514,7 +514,7 @@ void test_vluxseg3ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_tu( @@ -529,7 +529,7 @@ void test_vluxseg3ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_tu( @@ -544,7 +544,7 @@ void test_vluxseg3ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_tu( @@ -559,7 +559,7 @@ void test_vluxseg3ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_tum( @@ -574,7 +574,7 @@ void test_vluxseg3ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_tum( @@ -589,7 +589,7 @@ void test_vluxseg3ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_tum( @@ -604,7 +604,7 @@ void test_vluxseg3ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_tum( @@ -619,7 +619,7 @@ void test_vluxseg3ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vluxseg3ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_tum( @@ -649,7 +649,7 @@ void test_vluxseg3ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_tum( @@ -664,7 +664,7 @@ void test_vluxseg3ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_tum( @@ -679,7 +679,7 @@ void test_vluxseg3ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_tum( @@ -694,7 +694,7 @@ void test_vluxseg3ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_tum( @@ -709,7 +709,7 @@ void test_vluxseg3ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_tum( @@ -724,7 +724,7 @@ void test_vluxseg3ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vluxseg3ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_tum( @@ -754,7 +754,7 @@ void test_vluxseg3ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_tum( @@ -769,7 +769,7 @@ void test_vluxseg3ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_tum( @@ -784,7 +784,7 @@ void test_vluxseg3ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_tum( @@ -799,7 +799,7 @@ void test_vluxseg3ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_tum( @@ -814,7 +814,7 @@ void test_vluxseg3ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_tum( @@ -829,7 +829,7 @@ void test_vluxseg3ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vluxseg3ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_tum( @@ -859,7 +859,7 @@ void test_vluxseg3ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_tum( @@ -874,7 +874,7 @@ void test_vluxseg3ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_tum( @@ -889,7 +889,7 @@ void test_vluxseg3ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_tum( @@ -904,7 +904,7 @@ void test_vluxseg3ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_tum( @@ -919,7 +919,7 @@ void test_vluxseg3ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_tum( @@ -934,7 +934,7 @@ void test_vluxseg3ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vluxseg3ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_tum( @@ -964,7 +964,7 @@ void test_vluxseg3ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_tum( @@ -979,7 +979,7 @@ void test_vluxseg3ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_tum( @@ -994,7 +994,7 @@ void test_vluxseg3ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_tum( @@ -1009,7 +1009,7 @@ void test_vluxseg3ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_tum( @@ -1024,7 +1024,7 @@ void test_vluxseg3ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_tum( @@ -1039,7 +1039,7 @@ void test_vluxseg3ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg3ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_tum( @@ -1069,7 +1069,7 @@ void test_vluxseg3ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_tum( @@ -1084,7 +1084,7 @@ void test_vluxseg3ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_tum( @@ -1099,7 +1099,7 @@ void test_vluxseg3ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_tum( @@ -1114,7 +1114,7 @@ void test_vluxseg3ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_tumu( @@ -1129,7 +1129,7 @@ void test_vluxseg3ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_tumu( @@ -1144,7 +1144,7 @@ void test_vluxseg3ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vluxseg3ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_tumu( @@ -1174,7 +1174,7 @@ void test_vluxseg3ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_tumu( @@ -1189,7 +1189,7 @@ void test_vluxseg3ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_tumu( @@ -1204,7 +1204,7 @@ void test_vluxseg3ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_tumu( @@ -1219,7 +1219,7 @@ void test_vluxseg3ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_tumu( @@ -1234,7 +1234,7 @@ void test_vluxseg3ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_tumu( @@ -1249,7 +1249,7 @@ void test_vluxseg3ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_tumu( @@ -1264,7 +1264,7 @@ void test_vluxseg3ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vluxseg3ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_tumu( @@ -1294,7 +1294,7 @@ void test_vluxseg3ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_tumu( @@ -1309,7 +1309,7 @@ void test_vluxseg3ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_tumu( @@ -1324,7 +1324,7 @@ void test_vluxseg3ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_tumu( @@ -1339,7 +1339,7 @@ void test_vluxseg3ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vluxseg3ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg3ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_tumu( @@ -1384,7 +1384,7 @@ void test_vluxseg3ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_tumu( @@ -1399,7 +1399,7 @@ void test_vluxseg3ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_tumu( @@ -1414,7 +1414,7 @@ void test_vluxseg3ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg3ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_tumu( @@ -1444,7 +1444,7 @@ void test_vluxseg3ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_tumu( @@ -1459,7 +1459,7 @@ void test_vluxseg3ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_tumu( @@ -1474,7 +1474,7 @@ void test_vluxseg3ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_tumu( @@ -1489,7 +1489,7 @@ void test_vluxseg3ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_tumu( @@ -1504,7 +1504,7 @@ void test_vluxseg3ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_tumu( @@ -1519,7 +1519,7 @@ void test_vluxseg3ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_tumu( @@ -1534,7 +1534,7 @@ void test_vluxseg3ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_tumu( @@ -1549,7 +1549,7 @@ void test_vluxseg3ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_tumu( @@ -1564,7 +1564,7 @@ void test_vluxseg3ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg3ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_tumu( @@ -1594,7 +1594,7 @@ void test_vluxseg3ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_tumu( @@ -1609,7 +1609,7 @@ void test_vluxseg3ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_tumu( @@ -1624,7 +1624,7 @@ void test_vluxseg3ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_tumu( @@ -1639,7 +1639,7 @@ void test_vluxseg3ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_tumu( @@ -1654,7 +1654,7 @@ void test_vluxseg3ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_tumu( @@ -1669,7 +1669,7 @@ void test_vluxseg3ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_mu( @@ -1684,7 +1684,7 @@ void test_vluxseg3ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_mu( @@ -1699,7 +1699,7 @@ void test_vluxseg3ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_mu( @@ -1714,7 +1714,7 @@ void test_vluxseg3ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_mu( @@ -1729,7 +1729,7 @@ void test_vluxseg3ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_mu( @@ -1744,7 +1744,7 @@ void test_vluxseg3ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_mu( @@ -1759,7 +1759,7 @@ void test_vluxseg3ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_mu( @@ -1774,7 +1774,7 @@ void test_vluxseg3ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_mu( @@ -1789,7 +1789,7 @@ void test_vluxseg3ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_mu( @@ -1804,7 +1804,7 @@ void test_vluxseg3ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_mu( @@ -1819,7 +1819,7 @@ void test_vluxseg3ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_mu( @@ -1834,7 +1834,7 @@ void test_vluxseg3ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_mu( @@ -1849,7 +1849,7 @@ void test_vluxseg3ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_mu( @@ -1864,7 +1864,7 @@ void test_vluxseg3ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_mu( @@ -1879,7 +1879,7 @@ void test_vluxseg3ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_mu( @@ -1894,7 +1894,7 @@ void test_vluxseg3ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_mu( @@ -1909,7 +1909,7 @@ void test_vluxseg3ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_mu( @@ -1924,7 +1924,7 @@ void test_vluxseg3ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_mu( @@ -1939,7 +1939,7 @@ void test_vluxseg3ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_mu( @@ -1954,7 +1954,7 @@ void test_vluxseg3ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_mu( @@ -1969,7 +1969,7 @@ void test_vluxseg3ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_mu( @@ -1984,7 +1984,7 @@ void test_vluxseg3ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_mu( @@ -1999,7 +1999,7 @@ void test_vluxseg3ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_mu( @@ -2014,7 +2014,7 @@ void test_vluxseg3ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_mu( @@ -2029,7 +2029,7 @@ void test_vluxseg3ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_mu( @@ -2044,7 +2044,7 @@ void test_vluxseg3ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_mu( @@ -2059,7 +2059,7 @@ void test_vluxseg3ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_mu( @@ -2074,7 +2074,7 @@ void test_vluxseg3ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_mu( @@ -2089,7 +2089,7 @@ void test_vluxseg3ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_mu( @@ -2104,7 +2104,7 @@ void test_vluxseg3ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_mu( @@ -2119,7 +2119,7 @@ void test_vluxseg3ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_mu( @@ -2134,7 +2134,7 @@ void test_vluxseg3ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_mu( @@ -2149,7 +2149,7 @@ void test_vluxseg3ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_mu( @@ -2164,7 +2164,7 @@ void test_vluxseg3ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_mu( @@ -2179,7 +2179,7 @@ void test_vluxseg3ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_mu( @@ -2194,7 +2194,7 @@ void test_vluxseg3ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_mu( @@ -2209,7 +2209,7 @@ void test_vluxseg3ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_mu( @@ -2224,6 +2224,6 @@ void test_vluxseg3ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei32_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c index da347515167d..37d93e95900e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei64.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_tu( @@ -34,7 +34,7 @@ void test_vluxseg3ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_tu( @@ -49,7 +49,7 @@ void test_vluxseg3ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_tu( @@ -64,7 +64,7 @@ void test_vluxseg3ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_tu( @@ -79,7 +79,7 @@ void test_vluxseg3ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_tu( @@ -94,7 +94,7 @@ void test_vluxseg3ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_tu( @@ -109,7 +109,7 @@ void test_vluxseg3ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_tu( @@ -124,7 +124,7 @@ void test_vluxseg3ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_tu( @@ -139,7 +139,7 @@ void test_vluxseg3ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_tu( @@ -154,7 +154,7 @@ void test_vluxseg3ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_tu( @@ -169,7 +169,7 @@ void test_vluxseg3ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_tu( @@ -184,7 +184,7 @@ void test_vluxseg3ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_tu( @@ -199,7 +199,7 @@ void test_vluxseg3ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_tu( @@ -214,7 +214,7 @@ void test_vluxseg3ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_tu( @@ -229,7 +229,7 @@ void test_vluxseg3ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_tu( @@ -244,7 +244,7 @@ void test_vluxseg3ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_tu( @@ -259,7 +259,7 @@ void test_vluxseg3ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_tu( @@ -274,7 +274,7 @@ void test_vluxseg3ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_tu( @@ -289,7 +289,7 @@ void test_vluxseg3ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_tu( @@ -304,7 +304,7 @@ void test_vluxseg3ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_tu( @@ -319,7 +319,7 @@ void test_vluxseg3ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_tu( @@ -334,7 +334,7 @@ void test_vluxseg3ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_tu( @@ -349,7 +349,7 @@ void test_vluxseg3ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_tu( @@ -364,7 +364,7 @@ void test_vluxseg3ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_tu( @@ -379,7 +379,7 @@ void test_vluxseg3ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_tu( @@ -394,7 +394,7 @@ void test_vluxseg3ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_tu( @@ -409,7 +409,7 @@ void test_vluxseg3ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_tu( @@ -424,7 +424,7 @@ void test_vluxseg3ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_tu( @@ -439,7 +439,7 @@ void test_vluxseg3ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_tu( @@ -454,7 +454,7 @@ void test_vluxseg3ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_tu( @@ -469,7 +469,7 @@ void test_vluxseg3ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_tu( @@ -484,7 +484,7 @@ void test_vluxseg3ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_tu( @@ -499,7 +499,7 @@ void test_vluxseg3ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_tu( @@ -514,7 +514,7 @@ void test_vluxseg3ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_tu( @@ -529,7 +529,7 @@ void test_vluxseg3ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_tum( @@ -544,7 +544,7 @@ void test_vluxseg3ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_tum( @@ -559,7 +559,7 @@ void test_vluxseg3ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_tum( @@ -574,7 +574,7 @@ void test_vluxseg3ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_tum( @@ -589,7 +589,7 @@ void test_vluxseg3ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_tum( @@ -604,7 +604,7 @@ void test_vluxseg3ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_tum( @@ -619,7 +619,7 @@ void test_vluxseg3ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_tum( @@ -634,7 +634,7 @@ void test_vluxseg3ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_tum( @@ -649,7 +649,7 @@ void test_vluxseg3ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_tum( @@ -664,7 +664,7 @@ void test_vluxseg3ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_tum( @@ -679,7 +679,7 @@ void test_vluxseg3ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_tum( @@ -694,7 +694,7 @@ void test_vluxseg3ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_tum( @@ -709,7 +709,7 @@ void test_vluxseg3ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_tum( @@ -724,7 +724,7 @@ void test_vluxseg3ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_tum( @@ -739,7 +739,7 @@ void test_vluxseg3ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_tum( @@ -754,7 +754,7 @@ void test_vluxseg3ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_tum( @@ -769,7 +769,7 @@ void test_vluxseg3ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_tum( @@ -784,7 +784,7 @@ void test_vluxseg3ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_tum( @@ -799,7 +799,7 @@ void test_vluxseg3ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_tum( @@ -814,7 +814,7 @@ void test_vluxseg3ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_tum( @@ -829,7 +829,7 @@ void test_vluxseg3ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_tum( @@ -844,7 +844,7 @@ void test_vluxseg3ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_tum( @@ -859,7 +859,7 @@ void test_vluxseg3ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_tum( @@ -874,7 +874,7 @@ void test_vluxseg3ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_tum( @@ -889,7 +889,7 @@ void test_vluxseg3ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_tum( @@ -904,7 +904,7 @@ void test_vluxseg3ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_tum( @@ -919,7 +919,7 @@ void test_vluxseg3ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_tum( @@ -934,7 +934,7 @@ void test_vluxseg3ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_tum( @@ -949,7 +949,7 @@ void test_vluxseg3ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_tum( @@ -964,7 +964,7 @@ void test_vluxseg3ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_tum( @@ -979,7 +979,7 @@ void test_vluxseg3ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_tum( @@ -994,7 +994,7 @@ void test_vluxseg3ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_tum( @@ -1009,7 +1009,7 @@ void test_vluxseg3ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_tum( @@ -1024,7 +1024,7 @@ void test_vluxseg3ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_tum( @@ -1039,7 +1039,7 @@ void test_vluxseg3ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg3ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_tumu( @@ -1069,7 +1069,7 @@ void test_vluxseg3ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_tumu( @@ -1084,7 +1084,7 @@ void test_vluxseg3ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_tumu( @@ -1099,7 +1099,7 @@ void test_vluxseg3ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_tumu( @@ -1114,7 +1114,7 @@ void test_vluxseg3ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_tumu( @@ -1129,7 +1129,7 @@ void test_vluxseg3ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_tumu( @@ -1144,7 +1144,7 @@ void test_vluxseg3ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_tumu( @@ -1159,7 +1159,7 @@ void test_vluxseg3ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_tumu( @@ -1174,7 +1174,7 @@ void test_vluxseg3ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_tumu( @@ -1189,7 +1189,7 @@ void test_vluxseg3ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_tumu( @@ -1204,7 +1204,7 @@ void test_vluxseg3ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_tumu( @@ -1219,7 +1219,7 @@ void test_vluxseg3ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_tumu( @@ -1234,7 +1234,7 @@ void test_vluxseg3ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_tumu( @@ -1249,7 +1249,7 @@ void test_vluxseg3ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_tumu( @@ -1264,7 +1264,7 @@ void test_vluxseg3ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_tumu( @@ -1279,7 +1279,7 @@ void test_vluxseg3ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_tumu( @@ -1294,7 +1294,7 @@ void test_vluxseg3ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_tumu( @@ -1309,7 +1309,7 @@ void test_vluxseg3ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_tumu( @@ -1324,7 +1324,7 @@ void test_vluxseg3ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_tumu( @@ -1339,7 +1339,7 @@ void test_vluxseg3ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_tumu( @@ -1354,7 +1354,7 @@ void test_vluxseg3ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg3ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_tumu( @@ -1384,7 +1384,7 @@ void test_vluxseg3ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_tumu( @@ -1399,7 +1399,7 @@ void test_vluxseg3ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_tumu( @@ -1414,7 +1414,7 @@ void test_vluxseg3ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg3ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_tumu( @@ -1444,7 +1444,7 @@ void test_vluxseg3ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_tumu( @@ -1459,7 +1459,7 @@ void test_vluxseg3ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_tumu( @@ -1474,7 +1474,7 @@ void test_vluxseg3ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_tumu( @@ -1489,7 +1489,7 @@ void test_vluxseg3ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_tumu( @@ -1504,7 +1504,7 @@ void test_vluxseg3ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_tumu( @@ -1519,7 +1519,7 @@ void test_vluxseg3ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_tumu( @@ -1534,7 +1534,7 @@ void test_vluxseg3ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_tumu( @@ -1549,7 +1549,7 @@ void test_vluxseg3ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_tumu( @@ -1564,7 +1564,7 @@ void test_vluxseg3ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg3ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_mu( @@ -1594,7 +1594,7 @@ void test_vluxseg3ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_mu( @@ -1609,7 +1609,7 @@ void test_vluxseg3ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_mu( @@ -1624,7 +1624,7 @@ void test_vluxseg3ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_mu( @@ -1639,7 +1639,7 @@ void test_vluxseg3ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_mu( @@ -1654,7 +1654,7 @@ void test_vluxseg3ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_mu( @@ -1669,7 +1669,7 @@ void test_vluxseg3ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_mu( @@ -1684,7 +1684,7 @@ void test_vluxseg3ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_mu( @@ -1699,7 +1699,7 @@ void test_vluxseg3ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_mu( @@ -1714,7 +1714,7 @@ void test_vluxseg3ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_mu( @@ -1729,7 +1729,7 @@ void test_vluxseg3ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_mu( @@ -1744,7 +1744,7 @@ void test_vluxseg3ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_mu( @@ -1759,7 +1759,7 @@ void test_vluxseg3ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_mu( @@ -1774,7 +1774,7 @@ void test_vluxseg3ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_mu( @@ -1789,7 +1789,7 @@ void test_vluxseg3ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_mu( @@ -1804,7 +1804,7 @@ void test_vluxseg3ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_mu( @@ -1819,7 +1819,7 @@ void test_vluxseg3ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_mu( @@ -1834,7 +1834,7 @@ void test_vluxseg3ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_mu( @@ -1849,7 +1849,7 @@ void test_vluxseg3ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_mu( @@ -1864,7 +1864,7 @@ void test_vluxseg3ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_mu( @@ -1879,7 +1879,7 @@ void test_vluxseg3ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_mu( @@ -1894,7 +1894,7 @@ void test_vluxseg3ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_mu( @@ -1909,7 +1909,7 @@ void test_vluxseg3ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_mu( @@ -1924,7 +1924,7 @@ void test_vluxseg3ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_mu( @@ -1939,7 +1939,7 @@ void test_vluxseg3ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_mu( @@ -1954,7 +1954,7 @@ void test_vluxseg3ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_mu( @@ -1969,7 +1969,7 @@ void test_vluxseg3ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_mu( @@ -1984,7 +1984,7 @@ void test_vluxseg3ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_mu( @@ -1999,7 +1999,7 @@ void test_vluxseg3ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_mu( @@ -2014,7 +2014,7 @@ void test_vluxseg3ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_mu( @@ -2029,7 +2029,7 @@ void test_vluxseg3ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_mu( @@ -2044,7 +2044,7 @@ void test_vluxseg3ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_mu( @@ -2059,7 +2059,7 @@ void test_vluxseg3ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_mu( @@ -2074,7 +2074,7 @@ void test_vluxseg3ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_mu( @@ -2089,7 +2089,7 @@ void test_vluxseg3ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_mu( @@ -2104,6 +2104,6 @@ void test_vluxseg3ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei64_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c index 4efd22fdb400..4126d9403ad1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg3ei8.c @@ -19,7 +19,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_tu( @@ -34,7 +34,7 @@ void test_vluxseg3ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_tu( @@ -49,7 +49,7 @@ void test_vluxseg3ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_tu( @@ -64,7 +64,7 @@ void test_vluxseg3ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_tu( @@ -79,7 +79,7 @@ void test_vluxseg3ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_tu( @@ -94,7 +94,7 @@ void test_vluxseg3ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_tu( @@ -109,7 +109,7 @@ void test_vluxseg3ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_tu( @@ -124,7 +124,7 @@ void test_vluxseg3ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_tu( @@ -139,7 +139,7 @@ void test_vluxseg3ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_tu( @@ -154,7 +154,7 @@ void test_vluxseg3ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_tu( @@ -169,7 +169,7 @@ void test_vluxseg3ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_tu( @@ -184,7 +184,7 @@ void test_vluxseg3ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_tu( @@ -199,7 +199,7 @@ void test_vluxseg3ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_tu( @@ -214,7 +214,7 @@ void test_vluxseg3ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_tu( @@ -229,7 +229,7 @@ void test_vluxseg3ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_tu( @@ -244,7 +244,7 @@ void test_vluxseg3ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_tu( @@ -259,7 +259,7 @@ void test_vluxseg3ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_tu( @@ -274,7 +274,7 @@ void test_vluxseg3ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_tu( @@ -289,7 +289,7 @@ void test_vluxseg3ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_tu( @@ -304,7 +304,7 @@ void test_vluxseg3ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_tu( @@ -319,7 +319,7 @@ void test_vluxseg3ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_tu( @@ -334,7 +334,7 @@ void test_vluxseg3ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_tu( @@ -349,7 +349,7 @@ void test_vluxseg3ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_tu( @@ -364,7 +364,7 @@ void test_vluxseg3ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_tu( @@ -379,7 +379,7 @@ void test_vluxseg3ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_tu( @@ -394,7 +394,7 @@ void test_vluxseg3ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_tu( @@ -409,7 +409,7 @@ void test_vluxseg3ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_tu( @@ -424,7 +424,7 @@ void test_vluxseg3ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_tu( @@ -439,7 +439,7 @@ void test_vluxseg3ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_tu( @@ -454,7 +454,7 @@ void test_vluxseg3ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_tu( @@ -469,7 +469,7 @@ void test_vluxseg3ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_tu( @@ -484,7 +484,7 @@ void test_vluxseg3ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_tu( @@ -499,7 +499,7 @@ void test_vluxseg3ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_tu( @@ -514,7 +514,7 @@ void test_vluxseg3ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_tu( @@ -529,7 +529,7 @@ void test_vluxseg3ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_tu( @@ -544,7 +544,7 @@ void test_vluxseg3ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_tu( @@ -559,7 +559,7 @@ void test_vluxseg3ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_tum( @@ -574,7 +574,7 @@ void test_vluxseg3ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_tum( @@ -589,7 +589,7 @@ void test_vluxseg3ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_tum( @@ -604,7 +604,7 @@ void test_vluxseg3ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_tum( @@ -619,7 +619,7 @@ void test_vluxseg3ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vluxseg3ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_tum( @@ -649,7 +649,7 @@ void test_vluxseg3ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_tum( @@ -664,7 +664,7 @@ void test_vluxseg3ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_tum( @@ -679,7 +679,7 @@ void test_vluxseg3ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_tum( @@ -694,7 +694,7 @@ void test_vluxseg3ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_tum( @@ -709,7 +709,7 @@ void test_vluxseg3ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_tum( @@ -724,7 +724,7 @@ void test_vluxseg3ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vluxseg3ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_tum( @@ -754,7 +754,7 @@ void test_vluxseg3ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_tum( @@ -769,7 +769,7 @@ void test_vluxseg3ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_tum( @@ -784,7 +784,7 @@ void test_vluxseg3ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vb // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_tum( @@ -799,7 +799,7 @@ void test_vluxseg3ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_tum( @@ -814,7 +814,7 @@ void test_vluxseg3ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_tum( @@ -829,7 +829,7 @@ void test_vluxseg3ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vluxseg3ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_tum( @@ -859,7 +859,7 @@ void test_vluxseg3ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_tum( @@ -874,7 +874,7 @@ void test_vluxseg3ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_tum( @@ -889,7 +889,7 @@ void test_vluxseg3ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_tum( @@ -904,7 +904,7 @@ void test_vluxseg3ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_tum( @@ -919,7 +919,7 @@ void test_vluxseg3ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_tum( @@ -934,7 +934,7 @@ void test_vluxseg3ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vluxseg3ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_tum( @@ -964,7 +964,7 @@ void test_vluxseg3ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_tum( @@ -979,7 +979,7 @@ void test_vluxseg3ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_tum( @@ -994,7 +994,7 @@ void test_vluxseg3ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_tum( @@ -1009,7 +1009,7 @@ void test_vluxseg3ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_tum( @@ -1024,7 +1024,7 @@ void test_vluxseg3ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_tum( @@ -1039,7 +1039,7 @@ void test_vluxseg3ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg3ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_tum( @@ -1069,7 +1069,7 @@ void test_vluxseg3ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_tum( @@ -1084,7 +1084,7 @@ void test_vluxseg3ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_tum( @@ -1099,7 +1099,7 @@ void test_vluxseg3ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_tum( @@ -1114,7 +1114,7 @@ void test_vluxseg3ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_tumu( @@ -1129,7 +1129,7 @@ void test_vluxseg3ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_tumu( @@ -1144,7 +1144,7 @@ void test_vluxseg3ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vluxseg3ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_tumu( @@ -1174,7 +1174,7 @@ void test_vluxseg3ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_tumu( @@ -1189,7 +1189,7 @@ void test_vluxseg3ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_tumu( @@ -1204,7 +1204,7 @@ void test_vluxseg3ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_tumu( @@ -1219,7 +1219,7 @@ void test_vluxseg3ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_tumu( @@ -1234,7 +1234,7 @@ void test_vluxseg3ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_tumu( @@ -1249,7 +1249,7 @@ void test_vluxseg3ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_tumu( @@ -1264,7 +1264,7 @@ void test_vluxseg3ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vluxseg3ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_tumu( @@ -1294,7 +1294,7 @@ void test_vluxseg3ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_tumu( @@ -1309,7 +1309,7 @@ void test_vluxseg3ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_tumu( @@ -1324,7 +1324,7 @@ void test_vluxseg3ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_tumu( @@ -1339,7 +1339,7 @@ void test_vluxseg3ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vluxseg3ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg3ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_tumu( @@ -1384,7 +1384,7 @@ void test_vluxseg3ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_tumu( @@ -1399,7 +1399,7 @@ void test_vluxseg3ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_tumu( @@ -1414,7 +1414,7 @@ void test_vluxseg3ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg3ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_tumu( @@ -1444,7 +1444,7 @@ void test_vluxseg3ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_tumu( @@ -1459,7 +1459,7 @@ void test_vluxseg3ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_tumu( @@ -1474,7 +1474,7 @@ void test_vluxseg3ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_tumu( @@ -1489,7 +1489,7 @@ void test_vluxseg3ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_tumu( @@ -1504,7 +1504,7 @@ void test_vluxseg3ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_tumu( @@ -1519,7 +1519,7 @@ void test_vluxseg3ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_tumu( @@ -1534,7 +1534,7 @@ void test_vluxseg3ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_tumu( @@ -1549,7 +1549,7 @@ void test_vluxseg3ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_tumu( @@ -1564,7 +1564,7 @@ void test_vluxseg3ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg3ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_tumu( @@ -1594,7 +1594,7 @@ void test_vluxseg3ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_tumu( @@ -1609,7 +1609,7 @@ void test_vluxseg3ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_tumu( @@ -1624,7 +1624,7 @@ void test_vluxseg3ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_tumu( @@ -1639,7 +1639,7 @@ void test_vluxseg3ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_tumu( @@ -1654,7 +1654,7 @@ void test_vluxseg3ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_tumu( @@ -1669,7 +1669,7 @@ void test_vluxseg3ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_mu( @@ -1684,7 +1684,7 @@ void test_vluxseg3ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_mu( @@ -1699,7 +1699,7 @@ void test_vluxseg3ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_mu( @@ -1714,7 +1714,7 @@ void test_vluxseg3ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_mu( @@ -1729,7 +1729,7 @@ void test_vluxseg3ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_mu( @@ -1744,7 +1744,7 @@ void test_vluxseg3ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_mu( @@ -1759,7 +1759,7 @@ void test_vluxseg3ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_mu( @@ -1774,7 +1774,7 @@ void test_vluxseg3ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_mu( @@ -1789,7 +1789,7 @@ void test_vluxseg3ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_mu( @@ -1804,7 +1804,7 @@ void test_vluxseg3ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_mu( @@ -1819,7 +1819,7 @@ void test_vluxseg3ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_mu( @@ -1834,7 +1834,7 @@ void test_vluxseg3ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_mu( @@ -1849,7 +1849,7 @@ void test_vluxseg3ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_mu( @@ -1864,7 +1864,7 @@ void test_vluxseg3ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_mu( @@ -1879,7 +1879,7 @@ void test_vluxseg3ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_mu( @@ -1894,7 +1894,7 @@ void test_vluxseg3ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_mu( @@ -1909,7 +1909,7 @@ void test_vluxseg3ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_mu( @@ -1924,7 +1924,7 @@ void test_vluxseg3ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_mu( @@ -1939,7 +1939,7 @@ void test_vluxseg3ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_mu( @@ -1954,7 +1954,7 @@ void test_vluxseg3ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_mu( @@ -1969,7 +1969,7 @@ void test_vluxseg3ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_mu( @@ -1984,7 +1984,7 @@ void test_vluxseg3ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_mu( @@ -1999,7 +1999,7 @@ void test_vluxseg3ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_mu( @@ -2014,7 +2014,7 @@ void test_vluxseg3ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_mu( @@ -2029,7 +2029,7 @@ void test_vluxseg3ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_mu( @@ -2044,7 +2044,7 @@ void test_vluxseg3ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_mu( @@ -2059,7 +2059,7 @@ void test_vluxseg3ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_mu( @@ -2074,7 +2074,7 @@ void test_vluxseg3ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_mu( @@ -2089,7 +2089,7 @@ void test_vluxseg3ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_mu( @@ -2104,7 +2104,7 @@ void test_vluxseg3ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_mu( @@ -2119,7 +2119,7 @@ void test_vluxseg3ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_mu( @@ -2134,7 +2134,7 @@ void test_vluxseg3ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_mu( @@ -2149,7 +2149,7 @@ void test_vluxseg3ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_mu( @@ -2164,7 +2164,7 @@ void test_vluxseg3ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_mu( @@ -2179,7 +2179,7 @@ void test_vluxseg3ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_mu( @@ -2194,7 +2194,7 @@ void test_vluxseg3ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_mu( @@ -2209,7 +2209,7 @@ void test_vluxseg3ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_mu( @@ -2224,6 +2224,6 @@ void test_vluxseg3ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg3ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); + return __riscv_vluxseg3ei8_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c index eaec03866e99..61ccb9561a4e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei16.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_tu( @@ -38,7 +38,7 @@ void test_vluxseg4ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_tu( @@ -55,7 +55,7 @@ void test_vluxseg4ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_tu( @@ -72,7 +72,7 @@ void test_vluxseg4ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_tu( @@ -89,7 +89,7 @@ void test_vluxseg4ei16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_tu( @@ -106,7 +106,7 @@ void test_vluxseg4ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_tu( @@ -123,7 +123,7 @@ void test_vluxseg4ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_tu( @@ -140,7 +140,7 @@ void test_vluxseg4ei16_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_tu( @@ -157,7 +157,7 @@ void test_vluxseg4ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_tu( @@ -174,7 +174,7 @@ void test_vluxseg4ei16_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_tu( @@ -191,7 +191,7 @@ void test_vluxseg4ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_tu( @@ -208,7 +208,7 @@ void test_vluxseg4ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_tu( @@ -225,7 +225,7 @@ void test_vluxseg4ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_tu( @@ -242,7 +242,7 @@ void test_vluxseg4ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_tu( @@ -259,7 +259,7 @@ void test_vluxseg4ei16_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_tu( @@ -276,7 +276,7 @@ void test_vluxseg4ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_tu( @@ -293,7 +293,7 @@ void test_vluxseg4ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_tu( @@ -310,7 +310,7 @@ void test_vluxseg4ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_tu( @@ -327,7 +327,7 @@ void test_vluxseg4ei16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_tu( @@ -344,7 +344,7 @@ void test_vluxseg4ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_tu( @@ -361,7 +361,7 @@ void test_vluxseg4ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_tu( @@ -378,7 +378,7 @@ void test_vluxseg4ei16_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_tu( @@ -395,7 +395,7 @@ void test_vluxseg4ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_tu( @@ -412,7 +412,7 @@ void test_vluxseg4ei16_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_tu( @@ -429,7 +429,7 @@ void test_vluxseg4ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_tu( @@ -446,7 +446,7 @@ void test_vluxseg4ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_tu( @@ -463,7 +463,7 @@ void test_vluxseg4ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_tu( @@ -480,7 +480,7 @@ void test_vluxseg4ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_tu( @@ -497,7 +497,7 @@ void test_vluxseg4ei16_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_tu( @@ -514,7 +514,7 @@ void test_vluxseg4ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_tu( @@ -531,7 +531,7 @@ void test_vluxseg4ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_tu( @@ -548,7 +548,7 @@ void test_vluxseg4ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_tu( @@ -565,7 +565,7 @@ void test_vluxseg4ei16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_tu( @@ -582,7 +582,7 @@ void test_vluxseg4ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_tu( @@ -599,7 +599,7 @@ void test_vluxseg4ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_tu( @@ -616,7 +616,7 @@ void test_vluxseg4ei16_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_tu( @@ -633,7 +633,7 @@ void test_vluxseg4ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_tum( @@ -650,7 +650,7 @@ void test_vluxseg4ei16_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_tum( @@ -667,7 +667,7 @@ void test_vluxseg4ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_tum( @@ -684,7 +684,7 @@ void test_vluxseg4ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_tum( @@ -701,7 +701,7 @@ void test_vluxseg4ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_tum( @@ -718,7 +718,7 @@ void test_vluxseg4ei16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_tum( @@ -735,7 +735,7 @@ void test_vluxseg4ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_tum( @@ -752,7 +752,7 @@ void test_vluxseg4ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_tum( @@ -769,7 +769,7 @@ void test_vluxseg4ei16_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_tum( @@ -786,7 +786,7 @@ void test_vluxseg4ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_tum( @@ -803,7 +803,7 @@ void test_vluxseg4ei16_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_tum( @@ -820,7 +820,7 @@ void test_vluxseg4ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_tum( @@ -837,7 +837,7 @@ void test_vluxseg4ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_tum( @@ -854,7 +854,7 @@ void test_vluxseg4ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_tum( @@ -871,7 +871,7 @@ void test_vluxseg4ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_tum( @@ -888,7 +888,7 @@ void test_vluxseg4ei16_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_tum( @@ -905,7 +905,7 @@ void test_vluxseg4ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_tum( @@ -922,7 +922,7 @@ void test_vluxseg4ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_tum( @@ -939,7 +939,7 @@ void test_vluxseg4ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_tum( @@ -956,7 +956,7 @@ void test_vluxseg4ei16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_tum( @@ -973,7 +973,7 @@ void test_vluxseg4ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_tum( @@ -990,7 +990,7 @@ void test_vluxseg4ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_tum( @@ -1007,7 +1007,7 @@ void test_vluxseg4ei16_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_tum( @@ -1024,7 +1024,7 @@ void test_vluxseg4ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_tum( @@ -1041,7 +1041,7 @@ void test_vluxseg4ei16_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_tum( @@ -1058,7 +1058,7 @@ void test_vluxseg4ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_tum( @@ -1075,7 +1075,7 @@ void test_vluxseg4ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_tum( @@ -1092,7 +1092,7 @@ void test_vluxseg4ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_tum( @@ -1109,7 +1109,7 @@ void test_vluxseg4ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_tum( @@ -1126,7 +1126,7 @@ void test_vluxseg4ei16_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_tum( @@ -1143,7 +1143,7 @@ void test_vluxseg4ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_tum( @@ -1160,7 +1160,7 @@ void test_vluxseg4ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_tum( @@ -1177,7 +1177,7 @@ void test_vluxseg4ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_tum( @@ -1194,7 +1194,7 @@ void test_vluxseg4ei16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_tum( @@ -1211,7 +1211,7 @@ void test_vluxseg4ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_tum( @@ -1228,7 +1228,7 @@ void test_vluxseg4ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_tum( @@ -1245,7 +1245,7 @@ void test_vluxseg4ei16_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_tum( @@ -1262,7 +1262,7 @@ void test_vluxseg4ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vluxseg4ei16_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_tumu( @@ -1296,7 +1296,7 @@ void test_vluxseg4ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_tumu( @@ -1313,7 +1313,7 @@ void test_vluxseg4ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_tumu( @@ -1330,7 +1330,7 @@ void test_vluxseg4ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_tumu( @@ -1347,7 +1347,7 @@ void test_vluxseg4ei16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_tumu( @@ -1364,7 +1364,7 @@ void test_vluxseg4ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_tumu( @@ -1381,7 +1381,7 @@ void test_vluxseg4ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_tumu( @@ -1398,7 +1398,7 @@ void test_vluxseg4ei16_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_tumu( @@ -1415,7 +1415,7 @@ void test_vluxseg4ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_tumu( @@ -1432,7 +1432,7 @@ void test_vluxseg4ei16_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_tumu( @@ -1449,7 +1449,7 @@ void test_vluxseg4ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_tumu( @@ -1466,7 +1466,7 @@ void test_vluxseg4ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_tumu( @@ -1483,7 +1483,7 @@ void test_vluxseg4ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_tumu( @@ -1500,7 +1500,7 @@ void test_vluxseg4ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_tumu( @@ -1517,7 +1517,7 @@ void test_vluxseg4ei16_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_tumu( @@ -1534,7 +1534,7 @@ void test_vluxseg4ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_tumu( @@ -1551,7 +1551,7 @@ void test_vluxseg4ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_tumu( @@ -1568,7 +1568,7 @@ void test_vluxseg4ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_tumu( @@ -1585,7 +1585,7 @@ void test_vluxseg4ei16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_tumu( @@ -1602,7 +1602,7 @@ void test_vluxseg4ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_tumu( @@ -1619,7 +1619,7 @@ void test_vluxseg4ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_tumu( @@ -1636,7 +1636,7 @@ void test_vluxseg4ei16_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_tumu( @@ -1653,7 +1653,7 @@ void test_vluxseg4ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_tumu( @@ -1670,7 +1670,7 @@ void test_vluxseg4ei16_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_tumu( @@ -1687,7 +1687,7 @@ void test_vluxseg4ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_tumu( @@ -1704,7 +1704,7 @@ void test_vluxseg4ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_tumu( @@ -1721,7 +1721,7 @@ void test_vluxseg4ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_tumu( @@ -1738,7 +1738,7 @@ void test_vluxseg4ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_tumu( @@ -1755,7 +1755,7 @@ void test_vluxseg4ei16_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_tumu( @@ -1772,7 +1772,7 @@ void test_vluxseg4ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_tumu( @@ -1789,7 +1789,7 @@ void test_vluxseg4ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_tumu( @@ -1806,7 +1806,7 @@ void test_vluxseg4ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_tumu( @@ -1823,7 +1823,7 @@ void test_vluxseg4ei16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_tumu( @@ -1840,7 +1840,7 @@ void test_vluxseg4ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_tumu( @@ -1857,7 +1857,7 @@ void test_vluxseg4ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_tumu( @@ -1874,7 +1874,7 @@ void test_vluxseg4ei16_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_tumu( @@ -1891,7 +1891,7 @@ void test_vluxseg4ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_mu( @@ -1908,7 +1908,7 @@ void test_vluxseg4ei16_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_mu( @@ -1925,7 +1925,7 @@ void test_vluxseg4ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_mu( @@ -1942,7 +1942,7 @@ void test_vluxseg4ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_mu( @@ -1959,7 +1959,7 @@ void test_vluxseg4ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_mu( @@ -1976,7 +1976,7 @@ void test_vluxseg4ei16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_mu( @@ -1993,7 +1993,7 @@ void test_vluxseg4ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_mu( @@ -2010,7 +2010,7 @@ void test_vluxseg4ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_mu( @@ -2027,7 +2027,7 @@ void test_vluxseg4ei16_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_mu( @@ -2044,7 +2044,7 @@ void test_vluxseg4ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_mu( @@ -2061,7 +2061,7 @@ void test_vluxseg4ei16_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_mu( @@ -2078,7 +2078,7 @@ void test_vluxseg4ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_mu( @@ -2095,7 +2095,7 @@ void test_vluxseg4ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_mu( @@ -2112,7 +2112,7 @@ void test_vluxseg4ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_mu( @@ -2129,7 +2129,7 @@ void test_vluxseg4ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_mu( @@ -2146,7 +2146,7 @@ void test_vluxseg4ei16_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_mu( @@ -2163,7 +2163,7 @@ void test_vluxseg4ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_mu( @@ -2180,7 +2180,7 @@ void test_vluxseg4ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_mu( @@ -2197,7 +2197,7 @@ void test_vluxseg4ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_mu( @@ -2214,7 +2214,7 @@ void test_vluxseg4ei16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_mu( @@ -2231,7 +2231,7 @@ void test_vluxseg4ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_mu( @@ -2248,7 +2248,7 @@ void test_vluxseg4ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_mu( @@ -2265,7 +2265,7 @@ void test_vluxseg4ei16_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_mu( @@ -2282,7 +2282,7 @@ void test_vluxseg4ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_mu( @@ -2299,7 +2299,7 @@ void test_vluxseg4ei16_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_mu( @@ -2316,7 +2316,7 @@ void test_vluxseg4ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_mu( @@ -2333,7 +2333,7 @@ void test_vluxseg4ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_mu( @@ -2350,7 +2350,7 @@ void test_vluxseg4ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_mu( @@ -2367,7 +2367,7 @@ void test_vluxseg4ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_mu( @@ -2384,7 +2384,7 @@ void test_vluxseg4ei16_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_mu( @@ -2401,7 +2401,7 @@ void test_vluxseg4ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_mu( @@ -2418,7 +2418,7 @@ void test_vluxseg4ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_mu( @@ -2435,7 +2435,7 @@ void test_vluxseg4ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_mu( @@ -2452,7 +2452,7 @@ void test_vluxseg4ei16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_mu( @@ -2469,7 +2469,7 @@ void test_vluxseg4ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_mu( @@ -2486,7 +2486,7 @@ void test_vluxseg4ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_mu( @@ -2503,7 +2503,7 @@ void test_vluxseg4ei16_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_mu( @@ -2520,6 +2520,6 @@ void test_vluxseg4ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei16_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei16_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c index 5d2aa6a7568e..52a9afd80186 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei32.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_tu( @@ -38,7 +38,7 @@ void test_vluxseg4ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_tu( @@ -55,7 +55,7 @@ void test_vluxseg4ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_tu( @@ -72,7 +72,7 @@ void test_vluxseg4ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_tu( @@ -89,7 +89,7 @@ void test_vluxseg4ei32_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_tu( @@ -106,7 +106,7 @@ void test_vluxseg4ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_tu( @@ -123,7 +123,7 @@ void test_vluxseg4ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_tu( @@ -140,7 +140,7 @@ void test_vluxseg4ei32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_tu( @@ -157,7 +157,7 @@ void test_vluxseg4ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_tu( @@ -174,7 +174,7 @@ void test_vluxseg4ei32_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_tu( @@ -191,7 +191,7 @@ void test_vluxseg4ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_tu( @@ -208,7 +208,7 @@ void test_vluxseg4ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_tu( @@ -225,7 +225,7 @@ void test_vluxseg4ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_tu( @@ -242,7 +242,7 @@ void test_vluxseg4ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_tu( @@ -259,7 +259,7 @@ void test_vluxseg4ei32_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_tu( @@ -276,7 +276,7 @@ void test_vluxseg4ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_tu( @@ -293,7 +293,7 @@ void test_vluxseg4ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_tu( @@ -310,7 +310,7 @@ void test_vluxseg4ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_tu( @@ -327,7 +327,7 @@ void test_vluxseg4ei32_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_tu( @@ -344,7 +344,7 @@ void test_vluxseg4ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_tu( @@ -361,7 +361,7 @@ void test_vluxseg4ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_tu( @@ -378,7 +378,7 @@ void test_vluxseg4ei32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_tu( @@ -395,7 +395,7 @@ void test_vluxseg4ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_tu( @@ -412,7 +412,7 @@ void test_vluxseg4ei32_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_tu( @@ -429,7 +429,7 @@ void test_vluxseg4ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_tu( @@ -446,7 +446,7 @@ void test_vluxseg4ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_tu( @@ -463,7 +463,7 @@ void test_vluxseg4ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_tu( @@ -480,7 +480,7 @@ void test_vluxseg4ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_tu( @@ -497,7 +497,7 @@ void test_vluxseg4ei32_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_tu( @@ -514,7 +514,7 @@ void test_vluxseg4ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_tu( @@ -531,7 +531,7 @@ void test_vluxseg4ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_tu( @@ -548,7 +548,7 @@ void test_vluxseg4ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_tu( @@ -565,7 +565,7 @@ void test_vluxseg4ei32_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_tu( @@ -582,7 +582,7 @@ void test_vluxseg4ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_tu( @@ -599,7 +599,7 @@ void test_vluxseg4ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_tu( @@ -616,7 +616,7 @@ void test_vluxseg4ei32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_tu( @@ -633,7 +633,7 @@ void test_vluxseg4ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_tum( @@ -650,7 +650,7 @@ void test_vluxseg4ei32_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_tum( @@ -667,7 +667,7 @@ void test_vluxseg4ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_tum( @@ -684,7 +684,7 @@ void test_vluxseg4ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_tum( @@ -701,7 +701,7 @@ void test_vluxseg4ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_tum( @@ -718,7 +718,7 @@ void test_vluxseg4ei32_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_tum( @@ -735,7 +735,7 @@ void test_vluxseg4ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_tum( @@ -752,7 +752,7 @@ void test_vluxseg4ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_tum( @@ -769,7 +769,7 @@ void test_vluxseg4ei32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_tum( @@ -786,7 +786,7 @@ void test_vluxseg4ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_tum( @@ -803,7 +803,7 @@ void test_vluxseg4ei32_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_tum( @@ -820,7 +820,7 @@ void test_vluxseg4ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_tum( @@ -837,7 +837,7 @@ void test_vluxseg4ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_tum( @@ -854,7 +854,7 @@ void test_vluxseg4ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_tum( @@ -871,7 +871,7 @@ void test_vluxseg4ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_tum( @@ -888,7 +888,7 @@ void test_vluxseg4ei32_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_tum( @@ -905,7 +905,7 @@ void test_vluxseg4ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_tum( @@ -922,7 +922,7 @@ void test_vluxseg4ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_tum( @@ -939,7 +939,7 @@ void test_vluxseg4ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_tum( @@ -956,7 +956,7 @@ void test_vluxseg4ei32_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_tum( @@ -973,7 +973,7 @@ void test_vluxseg4ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_tum( @@ -990,7 +990,7 @@ void test_vluxseg4ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_tum( @@ -1007,7 +1007,7 @@ void test_vluxseg4ei32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_tum( @@ -1024,7 +1024,7 @@ void test_vluxseg4ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_tum( @@ -1041,7 +1041,7 @@ void test_vluxseg4ei32_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_tum( @@ -1058,7 +1058,7 @@ void test_vluxseg4ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_tum( @@ -1075,7 +1075,7 @@ void test_vluxseg4ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_tum( @@ -1092,7 +1092,7 @@ void test_vluxseg4ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_tum( @@ -1109,7 +1109,7 @@ void test_vluxseg4ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_tum( @@ -1126,7 +1126,7 @@ void test_vluxseg4ei32_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_tum( @@ -1143,7 +1143,7 @@ void test_vluxseg4ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_tum( @@ -1160,7 +1160,7 @@ void test_vluxseg4ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_tum( @@ -1177,7 +1177,7 @@ void test_vluxseg4ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_tum( @@ -1194,7 +1194,7 @@ void test_vluxseg4ei32_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_tum( @@ -1211,7 +1211,7 @@ void test_vluxseg4ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_tum( @@ -1228,7 +1228,7 @@ void test_vluxseg4ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_tum( @@ -1245,7 +1245,7 @@ void test_vluxseg4ei32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_tum( @@ -1262,7 +1262,7 @@ void test_vluxseg4ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vluxseg4ei32_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_tumu( @@ -1296,7 +1296,7 @@ void test_vluxseg4ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_tumu( @@ -1313,7 +1313,7 @@ void test_vluxseg4ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_tumu( @@ -1330,7 +1330,7 @@ void test_vluxseg4ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_tumu( @@ -1347,7 +1347,7 @@ void test_vluxseg4ei32_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_tumu( @@ -1364,7 +1364,7 @@ void test_vluxseg4ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_tumu( @@ -1381,7 +1381,7 @@ void test_vluxseg4ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_tumu( @@ -1398,7 +1398,7 @@ void test_vluxseg4ei32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_tumu( @@ -1415,7 +1415,7 @@ void test_vluxseg4ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_tumu( @@ -1432,7 +1432,7 @@ void test_vluxseg4ei32_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_tumu( @@ -1449,7 +1449,7 @@ void test_vluxseg4ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_tumu( @@ -1466,7 +1466,7 @@ void test_vluxseg4ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_tumu( @@ -1483,7 +1483,7 @@ void test_vluxseg4ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_tumu( @@ -1500,7 +1500,7 @@ void test_vluxseg4ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_tumu( @@ -1517,7 +1517,7 @@ void test_vluxseg4ei32_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_tumu( @@ -1534,7 +1534,7 @@ void test_vluxseg4ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_tumu( @@ -1551,7 +1551,7 @@ void test_vluxseg4ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_tumu( @@ -1568,7 +1568,7 @@ void test_vluxseg4ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_tumu( @@ -1585,7 +1585,7 @@ void test_vluxseg4ei32_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_tumu( @@ -1602,7 +1602,7 @@ void test_vluxseg4ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_tumu( @@ -1619,7 +1619,7 @@ void test_vluxseg4ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_tumu( @@ -1636,7 +1636,7 @@ void test_vluxseg4ei32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_tumu( @@ -1653,7 +1653,7 @@ void test_vluxseg4ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_tumu( @@ -1670,7 +1670,7 @@ void test_vluxseg4ei32_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_tumu( @@ -1687,7 +1687,7 @@ void test_vluxseg4ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_tumu( @@ -1704,7 +1704,7 @@ void test_vluxseg4ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_tumu( @@ -1721,7 +1721,7 @@ void test_vluxseg4ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_tumu( @@ -1738,7 +1738,7 @@ void test_vluxseg4ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_tumu( @@ -1755,7 +1755,7 @@ void test_vluxseg4ei32_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_tumu( @@ -1772,7 +1772,7 @@ void test_vluxseg4ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_tumu( @@ -1789,7 +1789,7 @@ void test_vluxseg4ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_tumu( @@ -1806,7 +1806,7 @@ void test_vluxseg4ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_tumu( @@ -1823,7 +1823,7 @@ void test_vluxseg4ei32_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_tumu( @@ -1840,7 +1840,7 @@ void test_vluxseg4ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_tumu( @@ -1857,7 +1857,7 @@ void test_vluxseg4ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_tumu( @@ -1874,7 +1874,7 @@ void test_vluxseg4ei32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_tumu( @@ -1891,7 +1891,7 @@ void test_vluxseg4ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_mu( @@ -1908,7 +1908,7 @@ void test_vluxseg4ei32_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_mu( @@ -1925,7 +1925,7 @@ void test_vluxseg4ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_mu( @@ -1942,7 +1942,7 @@ void test_vluxseg4ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_mu( @@ -1959,7 +1959,7 @@ void test_vluxseg4ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_mu( @@ -1976,7 +1976,7 @@ void test_vluxseg4ei32_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_mu( @@ -1993,7 +1993,7 @@ void test_vluxseg4ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_mu( @@ -2010,7 +2010,7 @@ void test_vluxseg4ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_mu( @@ -2027,7 +2027,7 @@ void test_vluxseg4ei32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_mu( @@ -2044,7 +2044,7 @@ void test_vluxseg4ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_mu( @@ -2061,7 +2061,7 @@ void test_vluxseg4ei32_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_mu( @@ -2078,7 +2078,7 @@ void test_vluxseg4ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_mu( @@ -2095,7 +2095,7 @@ void test_vluxseg4ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_mu( @@ -2112,7 +2112,7 @@ void test_vluxseg4ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_mu( @@ -2129,7 +2129,7 @@ void test_vluxseg4ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_mu( @@ -2146,7 +2146,7 @@ void test_vluxseg4ei32_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_mu( @@ -2163,7 +2163,7 @@ void test_vluxseg4ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_mu( @@ -2180,7 +2180,7 @@ void test_vluxseg4ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_mu( @@ -2197,7 +2197,7 @@ void test_vluxseg4ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_mu( @@ -2214,7 +2214,7 @@ void test_vluxseg4ei32_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_mu( @@ -2231,7 +2231,7 @@ void test_vluxseg4ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_mu( @@ -2248,7 +2248,7 @@ void test_vluxseg4ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_mu( @@ -2265,7 +2265,7 @@ void test_vluxseg4ei32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_mu( @@ -2282,7 +2282,7 @@ void test_vluxseg4ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_mu( @@ -2299,7 +2299,7 @@ void test_vluxseg4ei32_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_mu( @@ -2316,7 +2316,7 @@ void test_vluxseg4ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_mu( @@ -2333,7 +2333,7 @@ void test_vluxseg4ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_mu( @@ -2350,7 +2350,7 @@ void test_vluxseg4ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_mu( @@ -2367,7 +2367,7 @@ void test_vluxseg4ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_mu( @@ -2384,7 +2384,7 @@ void test_vluxseg4ei32_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_mu( @@ -2401,7 +2401,7 @@ void test_vluxseg4ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_mu( @@ -2418,7 +2418,7 @@ void test_vluxseg4ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_mu( @@ -2435,7 +2435,7 @@ void test_vluxseg4ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_mu( @@ -2452,7 +2452,7 @@ void test_vluxseg4ei32_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_mu( @@ -2469,7 +2469,7 @@ void test_vluxseg4ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_mu( @@ -2486,7 +2486,7 @@ void test_vluxseg4ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_mu( @@ -2503,7 +2503,7 @@ void test_vluxseg4ei32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_mu( @@ -2520,6 +2520,6 @@ void test_vluxseg4ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei32_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei32_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c index d591e3c0f64b..678a23b43ab4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei64.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_tu( @@ -38,7 +38,7 @@ void test_vluxseg4ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_tu( @@ -55,7 +55,7 @@ void test_vluxseg4ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_tu( @@ -72,7 +72,7 @@ void test_vluxseg4ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_tu( @@ -89,7 +89,7 @@ void test_vluxseg4ei64_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_tu( @@ -106,7 +106,7 @@ void test_vluxseg4ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_tu( @@ -123,7 +123,7 @@ void test_vluxseg4ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_tu( @@ -140,7 +140,7 @@ void test_vluxseg4ei64_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_tu( @@ -157,7 +157,7 @@ void test_vluxseg4ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_tu( @@ -174,7 +174,7 @@ void test_vluxseg4ei64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_tu( @@ -191,7 +191,7 @@ void test_vluxseg4ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_tu( @@ -208,7 +208,7 @@ void test_vluxseg4ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_tu( @@ -225,7 +225,7 @@ void test_vluxseg4ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_tu( @@ -242,7 +242,7 @@ void test_vluxseg4ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_tu( @@ -259,7 +259,7 @@ void test_vluxseg4ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_tu( @@ -276,7 +276,7 @@ void test_vluxseg4ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_tu( @@ -293,7 +293,7 @@ void test_vluxseg4ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_tu( @@ -310,7 +310,7 @@ void test_vluxseg4ei64_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_tu( @@ -327,7 +327,7 @@ void test_vluxseg4ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_tu( @@ -344,7 +344,7 @@ void test_vluxseg4ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_tu( @@ -361,7 +361,7 @@ void test_vluxseg4ei64_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_tu( @@ -378,7 +378,7 @@ void test_vluxseg4ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_tu( @@ -395,7 +395,7 @@ void test_vluxseg4ei64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_tu( @@ -412,7 +412,7 @@ void test_vluxseg4ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_tu( @@ -429,7 +429,7 @@ void test_vluxseg4ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_tu( @@ -446,7 +446,7 @@ void test_vluxseg4ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_tu( @@ -463,7 +463,7 @@ void test_vluxseg4ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_tu( @@ -480,7 +480,7 @@ void test_vluxseg4ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_tu( @@ -497,7 +497,7 @@ void test_vluxseg4ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_tu( @@ -514,7 +514,7 @@ void test_vluxseg4ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_tu( @@ -531,7 +531,7 @@ void test_vluxseg4ei64_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_tu( @@ -548,7 +548,7 @@ void test_vluxseg4ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_tu( @@ -565,7 +565,7 @@ void test_vluxseg4ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_tu( @@ -582,7 +582,7 @@ void test_vluxseg4ei64_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_tu( @@ -599,7 +599,7 @@ void test_vluxseg4ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_tum( @@ -616,7 +616,7 @@ void test_vluxseg4ei64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_tum( @@ -633,7 +633,7 @@ void test_vluxseg4ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_tum( @@ -650,7 +650,7 @@ void test_vluxseg4ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_tum( @@ -667,7 +667,7 @@ void test_vluxseg4ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_tum( @@ -684,7 +684,7 @@ void test_vluxseg4ei64_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_tum( @@ -701,7 +701,7 @@ void test_vluxseg4ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_tum( @@ -718,7 +718,7 @@ void test_vluxseg4ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_tum( @@ -735,7 +735,7 @@ void test_vluxseg4ei64_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_tum( @@ -752,7 +752,7 @@ void test_vluxseg4ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_tum( @@ -769,7 +769,7 @@ void test_vluxseg4ei64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_tum( @@ -786,7 +786,7 @@ void test_vluxseg4ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_tum( @@ -803,7 +803,7 @@ void test_vluxseg4ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_tum( @@ -820,7 +820,7 @@ void test_vluxseg4ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_tum( @@ -837,7 +837,7 @@ void test_vluxseg4ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_tum( @@ -854,7 +854,7 @@ void test_vluxseg4ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_tum( @@ -871,7 +871,7 @@ void test_vluxseg4ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_tum( @@ -888,7 +888,7 @@ void test_vluxseg4ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_tum( @@ -905,7 +905,7 @@ void test_vluxseg4ei64_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_tum( @@ -922,7 +922,7 @@ void test_vluxseg4ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_tum( @@ -939,7 +939,7 @@ void test_vluxseg4ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_tum( @@ -956,7 +956,7 @@ void test_vluxseg4ei64_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_tum( @@ -973,7 +973,7 @@ void test_vluxseg4ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_tum( @@ -990,7 +990,7 @@ void test_vluxseg4ei64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_tum( @@ -1007,7 +1007,7 @@ void test_vluxseg4ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_tum( @@ -1024,7 +1024,7 @@ void test_vluxseg4ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_tum( @@ -1041,7 +1041,7 @@ void test_vluxseg4ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_tum( @@ -1058,7 +1058,7 @@ void test_vluxseg4ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_tum( @@ -1075,7 +1075,7 @@ void test_vluxseg4ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_tum( @@ -1092,7 +1092,7 @@ void test_vluxseg4ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_tum( @@ -1109,7 +1109,7 @@ void test_vluxseg4ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_tum( @@ -1126,7 +1126,7 @@ void test_vluxseg4ei64_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_tum( @@ -1143,7 +1143,7 @@ void test_vluxseg4ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_tum( @@ -1160,7 +1160,7 @@ void test_vluxseg4ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_tum( @@ -1177,7 +1177,7 @@ void test_vluxseg4ei64_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_tum( @@ -1194,7 +1194,7 @@ void test_vluxseg4ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_tumu( @@ -1211,7 +1211,7 @@ void test_vluxseg4ei64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_tumu( @@ -1228,7 +1228,7 @@ void test_vluxseg4ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_tumu( @@ -1245,7 +1245,7 @@ void test_vluxseg4ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_tumu( @@ -1262,7 +1262,7 @@ void test_vluxseg4ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_tumu( @@ -1279,7 +1279,7 @@ void test_vluxseg4ei64_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_tumu( @@ -1296,7 +1296,7 @@ void test_vluxseg4ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_tumu( @@ -1313,7 +1313,7 @@ void test_vluxseg4ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_tumu( @@ -1330,7 +1330,7 @@ void test_vluxseg4ei64_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_tumu( @@ -1347,7 +1347,7 @@ void test_vluxseg4ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_tumu( @@ -1364,7 +1364,7 @@ void test_vluxseg4ei64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_tumu( @@ -1381,7 +1381,7 @@ void test_vluxseg4ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_tumu( @@ -1398,7 +1398,7 @@ void test_vluxseg4ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_tumu( @@ -1415,7 +1415,7 @@ void test_vluxseg4ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_tumu( @@ -1432,7 +1432,7 @@ void test_vluxseg4ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_tumu( @@ -1449,7 +1449,7 @@ void test_vluxseg4ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_tumu( @@ -1466,7 +1466,7 @@ void test_vluxseg4ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_tumu( @@ -1483,7 +1483,7 @@ void test_vluxseg4ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_tumu( @@ -1500,7 +1500,7 @@ void test_vluxseg4ei64_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_tumu( @@ -1517,7 +1517,7 @@ void test_vluxseg4ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_tumu( @@ -1534,7 +1534,7 @@ void test_vluxseg4ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_tumu( @@ -1551,7 +1551,7 @@ void test_vluxseg4ei64_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_tumu( @@ -1568,7 +1568,7 @@ void test_vluxseg4ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_tumu( @@ -1585,7 +1585,7 @@ void test_vluxseg4ei64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_tumu( @@ -1602,7 +1602,7 @@ void test_vluxseg4ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_tumu( @@ -1619,7 +1619,7 @@ void test_vluxseg4ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_tumu( @@ -1636,7 +1636,7 @@ void test_vluxseg4ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_tumu( @@ -1653,7 +1653,7 @@ void test_vluxseg4ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_tumu( @@ -1670,7 +1670,7 @@ void test_vluxseg4ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_tumu( @@ -1687,7 +1687,7 @@ void test_vluxseg4ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_tumu( @@ -1704,7 +1704,7 @@ void test_vluxseg4ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_tumu( @@ -1721,7 +1721,7 @@ void test_vluxseg4ei64_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_tumu( @@ -1738,7 +1738,7 @@ void test_vluxseg4ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_tumu( @@ -1755,7 +1755,7 @@ void test_vluxseg4ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_tumu( @@ -1772,7 +1772,7 @@ void test_vluxseg4ei64_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_tumu( @@ -1789,7 +1789,7 @@ void test_vluxseg4ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_mu( @@ -1806,7 +1806,7 @@ void test_vluxseg4ei64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_mu( @@ -1823,7 +1823,7 @@ void test_vluxseg4ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_mu( @@ -1840,7 +1840,7 @@ void test_vluxseg4ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_mu( @@ -1857,7 +1857,7 @@ void test_vluxseg4ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_mu( @@ -1874,7 +1874,7 @@ void test_vluxseg4ei64_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_mu( @@ -1891,7 +1891,7 @@ void test_vluxseg4ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_mu( @@ -1908,7 +1908,7 @@ void test_vluxseg4ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_mu( @@ -1925,7 +1925,7 @@ void test_vluxseg4ei64_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_mu( @@ -1942,7 +1942,7 @@ void test_vluxseg4ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_mu( @@ -1959,7 +1959,7 @@ void test_vluxseg4ei64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_mu( @@ -1976,7 +1976,7 @@ void test_vluxseg4ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_mu( @@ -1993,7 +1993,7 @@ void test_vluxseg4ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_mu( @@ -2010,7 +2010,7 @@ void test_vluxseg4ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_mu( @@ -2027,7 +2027,7 @@ void test_vluxseg4ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_mu( @@ -2044,7 +2044,7 @@ void test_vluxseg4ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_mu( @@ -2061,7 +2061,7 @@ void test_vluxseg4ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_mu( @@ -2078,7 +2078,7 @@ void test_vluxseg4ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_mu( @@ -2095,7 +2095,7 @@ void test_vluxseg4ei64_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_mu( @@ -2112,7 +2112,7 @@ void test_vluxseg4ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_mu( @@ -2129,7 +2129,7 @@ void test_vluxseg4ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_mu( @@ -2146,7 +2146,7 @@ void test_vluxseg4ei64_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_mu( @@ -2163,7 +2163,7 @@ void test_vluxseg4ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_mu( @@ -2180,7 +2180,7 @@ void test_vluxseg4ei64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_mu( @@ -2197,7 +2197,7 @@ void test_vluxseg4ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_mu( @@ -2214,7 +2214,7 @@ void test_vluxseg4ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_mu( @@ -2231,7 +2231,7 @@ void test_vluxseg4ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_mu( @@ -2248,7 +2248,7 @@ void test_vluxseg4ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_mu( @@ -2265,7 +2265,7 @@ void test_vluxseg4ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_mu( @@ -2282,7 +2282,7 @@ void test_vluxseg4ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_mu( @@ -2299,7 +2299,7 @@ void test_vluxseg4ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_mu( @@ -2316,7 +2316,7 @@ void test_vluxseg4ei64_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_mu( @@ -2333,7 +2333,7 @@ void test_vluxseg4ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_mu( @@ -2350,7 +2350,7 @@ void test_vluxseg4ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_mu( @@ -2367,7 +2367,7 @@ void test_vluxseg4ei64_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_mu( @@ -2384,6 +2384,6 @@ void test_vluxseg4ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei64_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c index 106e928ef8f7..0b5f0d81fcd6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg4ei8.c @@ -21,7 +21,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_tu( @@ -38,7 +38,7 @@ void test_vluxseg4ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_tu( @@ -55,7 +55,7 @@ void test_vluxseg4ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_tu( @@ -72,7 +72,7 @@ void test_vluxseg4ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_tu( @@ -89,7 +89,7 @@ void test_vluxseg4ei8_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_tu( @@ -106,7 +106,7 @@ void test_vluxseg4ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_tu( @@ -123,7 +123,7 @@ void test_vluxseg4ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_tu( @@ -140,7 +140,7 @@ void test_vluxseg4ei8_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_tu( @@ -157,7 +157,7 @@ void test_vluxseg4ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_tu( @@ -174,7 +174,7 @@ void test_vluxseg4ei8_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_tu( @@ -191,7 +191,7 @@ void test_vluxseg4ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_tu( @@ -208,7 +208,7 @@ void test_vluxseg4ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_tu( @@ -225,7 +225,7 @@ void test_vluxseg4ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_tu( @@ -242,7 +242,7 @@ void test_vluxseg4ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_tu( @@ -259,7 +259,7 @@ void test_vluxseg4ei8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_tu( @@ -276,7 +276,7 @@ void test_vluxseg4ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_tu( @@ -293,7 +293,7 @@ void test_vluxseg4ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_tu( @@ -310,7 +310,7 @@ void test_vluxseg4ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_tu( @@ -327,7 +327,7 @@ void test_vluxseg4ei8_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_tu( @@ -344,7 +344,7 @@ void test_vluxseg4ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_tu( @@ -361,7 +361,7 @@ void test_vluxseg4ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_tu( @@ -378,7 +378,7 @@ void test_vluxseg4ei8_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_tu( @@ -395,7 +395,7 @@ void test_vluxseg4ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_tu( @@ -412,7 +412,7 @@ void test_vluxseg4ei8_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_tu( @@ -429,7 +429,7 @@ void test_vluxseg4ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_tu( @@ -446,7 +446,7 @@ void test_vluxseg4ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_tu( @@ -463,7 +463,7 @@ void test_vluxseg4ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_tu( @@ -480,7 +480,7 @@ void test_vluxseg4ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_tu( @@ -497,7 +497,7 @@ void test_vluxseg4ei8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_tu( @@ -514,7 +514,7 @@ void test_vluxseg4ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_tu( @@ -531,7 +531,7 @@ void test_vluxseg4ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_tu( @@ -548,7 +548,7 @@ void test_vluxseg4ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_tu( @@ -565,7 +565,7 @@ void test_vluxseg4ei8_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_tu( @@ -582,7 +582,7 @@ void test_vluxseg4ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_tu( @@ -599,7 +599,7 @@ void test_vluxseg4ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_tu( @@ -616,7 +616,7 @@ void test_vluxseg4ei8_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_tu( @@ -633,7 +633,7 @@ void test_vluxseg4ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_tum( @@ -650,7 +650,7 @@ void test_vluxseg4ei8_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_tum( @@ -667,7 +667,7 @@ void test_vluxseg4ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_tum( @@ -684,7 +684,7 @@ void test_vluxseg4ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_tum( @@ -701,7 +701,7 @@ void test_vluxseg4ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_tum( @@ -718,7 +718,7 @@ void test_vluxseg4ei8_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_tum( @@ -735,7 +735,7 @@ void test_vluxseg4ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_tum( @@ -752,7 +752,7 @@ void test_vluxseg4ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_tum( @@ -769,7 +769,7 @@ void test_vluxseg4ei8_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_tum( @@ -786,7 +786,7 @@ void test_vluxseg4ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_tum( @@ -803,7 +803,7 @@ void test_vluxseg4ei8_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_tum( @@ -820,7 +820,7 @@ void test_vluxseg4ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_tum( @@ -837,7 +837,7 @@ void test_vluxseg4ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_tum( @@ -854,7 +854,7 @@ void test_vluxseg4ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_tum( @@ -871,7 +871,7 @@ void test_vluxseg4ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_tum( @@ -888,7 +888,7 @@ void test_vluxseg4ei8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_tum( @@ -905,7 +905,7 @@ void test_vluxseg4ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_tum( @@ -922,7 +922,7 @@ void test_vluxseg4ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_tum( @@ -939,7 +939,7 @@ void test_vluxseg4ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_tum( @@ -956,7 +956,7 @@ void test_vluxseg4ei8_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_tum( @@ -973,7 +973,7 @@ void test_vluxseg4ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_tum( @@ -990,7 +990,7 @@ void test_vluxseg4ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_tum( @@ -1007,7 +1007,7 @@ void test_vluxseg4ei8_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_tum( @@ -1024,7 +1024,7 @@ void test_vluxseg4ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_tum( @@ -1041,7 +1041,7 @@ void test_vluxseg4ei8_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_tum( @@ -1058,7 +1058,7 @@ void test_vluxseg4ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_tum( @@ -1075,7 +1075,7 @@ void test_vluxseg4ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_tum( @@ -1092,7 +1092,7 @@ void test_vluxseg4ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_tum( @@ -1109,7 +1109,7 @@ void test_vluxseg4ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_tum( @@ -1126,7 +1126,7 @@ void test_vluxseg4ei8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_tum( @@ -1143,7 +1143,7 @@ void test_vluxseg4ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_tum( @@ -1160,7 +1160,7 @@ void test_vluxseg4ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_tum( @@ -1177,7 +1177,7 @@ void test_vluxseg4ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_tum( @@ -1194,7 +1194,7 @@ void test_vluxseg4ei8_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_tum( @@ -1211,7 +1211,7 @@ void test_vluxseg4ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_tum( @@ -1228,7 +1228,7 @@ void test_vluxseg4ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_tum( @@ -1245,7 +1245,7 @@ void test_vluxseg4ei8_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_tum( @@ -1262,7 +1262,7 @@ void test_vluxseg4ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_tumu( @@ -1279,7 +1279,7 @@ void test_vluxseg4ei8_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_tumu( @@ -1296,7 +1296,7 @@ void test_vluxseg4ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_tumu( @@ -1313,7 +1313,7 @@ void test_vluxseg4ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_tumu( @@ -1330,7 +1330,7 @@ void test_vluxseg4ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_tumu( @@ -1347,7 +1347,7 @@ void test_vluxseg4ei8_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_tumu( @@ -1364,7 +1364,7 @@ void test_vluxseg4ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_tumu( @@ -1381,7 +1381,7 @@ void test_vluxseg4ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_tumu( @@ -1398,7 +1398,7 @@ void test_vluxseg4ei8_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_tumu( @@ -1415,7 +1415,7 @@ void test_vluxseg4ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_tumu( @@ -1432,7 +1432,7 @@ void test_vluxseg4ei8_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_tumu( @@ -1449,7 +1449,7 @@ void test_vluxseg4ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_tumu( @@ -1466,7 +1466,7 @@ void test_vluxseg4ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_tumu( @@ -1483,7 +1483,7 @@ void test_vluxseg4ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_tumu( @@ -1500,7 +1500,7 @@ void test_vluxseg4ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_tumu( @@ -1517,7 +1517,7 @@ void test_vluxseg4ei8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_tumu( @@ -1534,7 +1534,7 @@ void test_vluxseg4ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_tumu( @@ -1551,7 +1551,7 @@ void test_vluxseg4ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_tumu( @@ -1568,7 +1568,7 @@ void test_vluxseg4ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_tumu( @@ -1585,7 +1585,7 @@ void test_vluxseg4ei8_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_tumu( @@ -1602,7 +1602,7 @@ void test_vluxseg4ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_tumu( @@ -1619,7 +1619,7 @@ void test_vluxseg4ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_tumu( @@ -1636,7 +1636,7 @@ void test_vluxseg4ei8_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_tumu( @@ -1653,7 +1653,7 @@ void test_vluxseg4ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_tumu( @@ -1670,7 +1670,7 @@ void test_vluxseg4ei8_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_tumu( @@ -1687,7 +1687,7 @@ void test_vluxseg4ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_tumu( @@ -1704,7 +1704,7 @@ void test_vluxseg4ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_tumu( @@ -1721,7 +1721,7 @@ void test_vluxseg4ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_tumu( @@ -1738,7 +1738,7 @@ void test_vluxseg4ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_tumu( @@ -1755,7 +1755,7 @@ void test_vluxseg4ei8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_tumu( @@ -1772,7 +1772,7 @@ void test_vluxseg4ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_tumu( @@ -1789,7 +1789,7 @@ void test_vluxseg4ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_tumu( @@ -1806,7 +1806,7 @@ void test_vluxseg4ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_tumu( @@ -1823,7 +1823,7 @@ void test_vluxseg4ei8_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_tumu( @@ -1840,7 +1840,7 @@ void test_vluxseg4ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_tumu( @@ -1857,7 +1857,7 @@ void test_vluxseg4ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_tumu( @@ -1874,7 +1874,7 @@ void test_vluxseg4ei8_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_tumu( @@ -1891,7 +1891,7 @@ void test_vluxseg4ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_mu( @@ -1908,7 +1908,7 @@ void test_vluxseg4ei8_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_mu( @@ -1925,7 +1925,7 @@ void test_vluxseg4ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_mu( @@ -1942,7 +1942,7 @@ void test_vluxseg4ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_mu( @@ -1959,7 +1959,7 @@ void test_vluxseg4ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_mu( @@ -1976,7 +1976,7 @@ void test_vluxseg4ei8_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_mu( @@ -1993,7 +1993,7 @@ void test_vluxseg4ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_mu( @@ -2010,7 +2010,7 @@ void test_vluxseg4ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_mu( @@ -2027,7 +2027,7 @@ void test_vluxseg4ei8_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_mu( @@ -2044,7 +2044,7 @@ void test_vluxseg4ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_mu( @@ -2061,7 +2061,7 @@ void test_vluxseg4ei8_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_mu( @@ -2078,7 +2078,7 @@ void test_vluxseg4ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_mu( @@ -2095,7 +2095,7 @@ void test_vluxseg4ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_mu( @@ -2112,7 +2112,7 @@ void test_vluxseg4ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_mu( @@ -2129,7 +2129,7 @@ void test_vluxseg4ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_mu( @@ -2146,7 +2146,7 @@ void test_vluxseg4ei8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_mu( @@ -2163,7 +2163,7 @@ void test_vluxseg4ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_mu( @@ -2180,7 +2180,7 @@ void test_vluxseg4ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_mu( @@ -2197,7 +2197,7 @@ void test_vluxseg4ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_mu( @@ -2214,7 +2214,7 @@ void test_vluxseg4ei8_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_mu( @@ -2231,7 +2231,7 @@ void test_vluxseg4ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_mu( @@ -2248,7 +2248,7 @@ void test_vluxseg4ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_mu( @@ -2265,7 +2265,7 @@ void test_vluxseg4ei8_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_mu( @@ -2282,7 +2282,7 @@ void test_vluxseg4ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_mu( @@ -2299,7 +2299,7 @@ void test_vluxseg4ei8_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_mu( @@ -2316,7 +2316,7 @@ void test_vluxseg4ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_mu( @@ -2333,7 +2333,7 @@ void test_vluxseg4ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_mu( @@ -2350,7 +2350,7 @@ void test_vluxseg4ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_mu( @@ -2367,7 +2367,7 @@ void test_vluxseg4ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_mu( @@ -2384,7 +2384,7 @@ void test_vluxseg4ei8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_mu( @@ -2401,7 +2401,7 @@ void test_vluxseg4ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_mu( @@ -2418,7 +2418,7 @@ void test_vluxseg4ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_mu( @@ -2435,7 +2435,7 @@ void test_vluxseg4ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_mu( @@ -2452,7 +2452,7 @@ void test_vluxseg4ei8_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_mu( @@ -2469,7 +2469,7 @@ void test_vluxseg4ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_mu( @@ -2486,7 +2486,7 @@ void test_vluxseg4ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_mu( @@ -2503,7 +2503,7 @@ void test_vluxseg4ei8_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_mu( @@ -2520,6 +2520,6 @@ void test_vluxseg4ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg4ei8_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); + return __riscv_vluxseg4ei8_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c index d68c645e9f05..c48bc62455f1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei16.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_tu( @@ -42,7 +42,7 @@ void test_vluxseg5ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_tu( @@ -61,7 +61,7 @@ void test_vluxseg5ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_tu( @@ -80,7 +80,7 @@ void test_vluxseg5ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_tu( @@ -99,7 +99,7 @@ void test_vluxseg5ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_tu( @@ -118,7 +118,7 @@ void test_vluxseg5ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_tu( @@ -137,7 +137,7 @@ void test_vluxseg5ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_tu( @@ -156,7 +156,7 @@ void test_vluxseg5ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_tu( @@ -175,7 +175,7 @@ void test_vluxseg5ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_tu( @@ -194,7 +194,7 @@ void test_vluxseg5ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_tu( @@ -213,7 +213,7 @@ void test_vluxseg5ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_tu( @@ -232,7 +232,7 @@ void test_vluxseg5ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_tu( @@ -251,7 +251,7 @@ void test_vluxseg5ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_tu( @@ -270,7 +270,7 @@ void test_vluxseg5ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_tu( @@ -289,7 +289,7 @@ void test_vluxseg5ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_tu( @@ -308,7 +308,7 @@ void test_vluxseg5ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_tu( @@ -327,7 +327,7 @@ void test_vluxseg5ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_tu( @@ -346,7 +346,7 @@ void test_vluxseg5ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_tu( @@ -365,7 +365,7 @@ void test_vluxseg5ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_tu( @@ -384,7 +384,7 @@ void test_vluxseg5ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_tu( @@ -403,7 +403,7 @@ void test_vluxseg5ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_tu( @@ -422,7 +422,7 @@ void test_vluxseg5ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_tu( @@ -441,7 +441,7 @@ void test_vluxseg5ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_tu( @@ -460,7 +460,7 @@ void test_vluxseg5ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_tu( @@ -479,7 +479,7 @@ void test_vluxseg5ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_tu( @@ -498,7 +498,7 @@ void test_vluxseg5ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_tum( @@ -517,7 +517,7 @@ void test_vluxseg5ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_tum( @@ -536,7 +536,7 @@ void test_vluxseg5ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_tum( @@ -555,7 +555,7 @@ void test_vluxseg5ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_tum( @@ -574,7 +574,7 @@ void test_vluxseg5ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_tum( @@ -593,7 +593,7 @@ void test_vluxseg5ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_tum( @@ -612,7 +612,7 @@ void test_vluxseg5ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_tum( @@ -631,7 +631,7 @@ void test_vluxseg5ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_tum( @@ -650,7 +650,7 @@ void test_vluxseg5ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_tum( @@ -669,7 +669,7 @@ void test_vluxseg5ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_tum( @@ -688,7 +688,7 @@ void test_vluxseg5ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_tum( @@ -707,7 +707,7 @@ void test_vluxseg5ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_tum( @@ -726,7 +726,7 @@ void test_vluxseg5ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_tum( @@ -745,7 +745,7 @@ void test_vluxseg5ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_tum( @@ -764,7 +764,7 @@ void test_vluxseg5ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_tum( @@ -783,7 +783,7 @@ void test_vluxseg5ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_tum( @@ -802,7 +802,7 @@ void test_vluxseg5ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_tum( @@ -821,7 +821,7 @@ void test_vluxseg5ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_tum( @@ -840,7 +840,7 @@ void test_vluxseg5ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_tum( @@ -859,7 +859,7 @@ void test_vluxseg5ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_tum( @@ -878,7 +878,7 @@ void test_vluxseg5ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_tum( @@ -897,7 +897,7 @@ void test_vluxseg5ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_tum( @@ -916,7 +916,7 @@ void test_vluxseg5ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_tum( @@ -935,7 +935,7 @@ void test_vluxseg5ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_tum( @@ -954,7 +954,7 @@ void test_vluxseg5ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_tum( @@ -973,7 +973,7 @@ void test_vluxseg5ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_tum( @@ -992,7 +992,7 @@ void test_vluxseg5ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_tumu( @@ -1011,7 +1011,7 @@ void test_vluxseg5ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_tumu( @@ -1030,7 +1030,7 @@ void test_vluxseg5ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_tumu( @@ -1049,7 +1049,7 @@ void test_vluxseg5ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_tumu( @@ -1068,7 +1068,7 @@ void test_vluxseg5ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_tumu( @@ -1087,7 +1087,7 @@ void test_vluxseg5ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_tumu( @@ -1106,7 +1106,7 @@ void test_vluxseg5ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_tumu( @@ -1125,7 +1125,7 @@ void test_vluxseg5ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_tumu( @@ -1144,7 +1144,7 @@ void test_vluxseg5ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_tumu( @@ -1163,7 +1163,7 @@ void test_vluxseg5ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_tumu( @@ -1182,7 +1182,7 @@ void test_vluxseg5ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_tumu( @@ -1201,7 +1201,7 @@ void test_vluxseg5ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_tumu( @@ -1220,7 +1220,7 @@ void test_vluxseg5ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_tumu( @@ -1239,7 +1239,7 @@ void test_vluxseg5ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_tumu( @@ -1258,7 +1258,7 @@ void test_vluxseg5ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_tumu( @@ -1277,7 +1277,7 @@ void test_vluxseg5ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_tumu( @@ -1296,7 +1296,7 @@ void test_vluxseg5ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_tumu( @@ -1315,7 +1315,7 @@ void test_vluxseg5ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_tumu( @@ -1334,7 +1334,7 @@ void test_vluxseg5ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_tumu( @@ -1353,7 +1353,7 @@ void test_vluxseg5ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_tumu( @@ -1372,7 +1372,7 @@ void test_vluxseg5ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_tumu( @@ -1391,7 +1391,7 @@ void test_vluxseg5ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_tumu( @@ -1410,7 +1410,7 @@ void test_vluxseg5ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg5ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_tumu( @@ -1448,7 +1448,7 @@ void test_vluxseg5ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_tumu( @@ -1467,7 +1467,7 @@ void test_vluxseg5ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_tumu( @@ -1486,7 +1486,7 @@ void test_vluxseg5ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_mu( @@ -1505,7 +1505,7 @@ void test_vluxseg5ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_mu( @@ -1524,7 +1524,7 @@ void test_vluxseg5ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_mu( @@ -1543,7 +1543,7 @@ void test_vluxseg5ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_mu( @@ -1562,7 +1562,7 @@ void test_vluxseg5ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_mu( @@ -1581,7 +1581,7 @@ void test_vluxseg5ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_mu( @@ -1600,7 +1600,7 @@ void test_vluxseg5ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_mu( @@ -1619,7 +1619,7 @@ void test_vluxseg5ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_mu( @@ -1638,7 +1638,7 @@ void test_vluxseg5ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_mu( @@ -1657,7 +1657,7 @@ void test_vluxseg5ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_mu( @@ -1676,7 +1676,7 @@ void test_vluxseg5ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_mu( @@ -1695,7 +1695,7 @@ void test_vluxseg5ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_mu( @@ -1714,7 +1714,7 @@ void test_vluxseg5ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_mu( @@ -1733,7 +1733,7 @@ void test_vluxseg5ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_mu( @@ -1752,7 +1752,7 @@ void test_vluxseg5ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_mu( @@ -1771,7 +1771,7 @@ void test_vluxseg5ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_mu( @@ -1790,7 +1790,7 @@ void test_vluxseg5ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_mu( @@ -1809,7 +1809,7 @@ void test_vluxseg5ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_mu( @@ -1828,7 +1828,7 @@ void test_vluxseg5ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_mu( @@ -1847,7 +1847,7 @@ void test_vluxseg5ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_mu( @@ -1866,7 +1866,7 @@ void test_vluxseg5ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_mu( @@ -1885,7 +1885,7 @@ void test_vluxseg5ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_mu( @@ -1904,7 +1904,7 @@ void test_vluxseg5ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_mu( @@ -1923,7 +1923,7 @@ void test_vluxseg5ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_mu( @@ -1942,7 +1942,7 @@ void test_vluxseg5ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_mu( @@ -1961,7 +1961,7 @@ void test_vluxseg5ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_mu( @@ -1980,6 +1980,6 @@ void test_vluxseg5ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei16_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c index fce2213b50ae..e00e95edb8a9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei32.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_tu( @@ -42,7 +42,7 @@ void test_vluxseg5ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_tu( @@ -61,7 +61,7 @@ void test_vluxseg5ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_tu( @@ -80,7 +80,7 @@ void test_vluxseg5ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_tu( @@ -99,7 +99,7 @@ void test_vluxseg5ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_tu( @@ -118,7 +118,7 @@ void test_vluxseg5ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_tu( @@ -137,7 +137,7 @@ void test_vluxseg5ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_tu( @@ -156,7 +156,7 @@ void test_vluxseg5ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_tu( @@ -175,7 +175,7 @@ void test_vluxseg5ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_tu( @@ -194,7 +194,7 @@ void test_vluxseg5ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_tu( @@ -213,7 +213,7 @@ void test_vluxseg5ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_tu( @@ -232,7 +232,7 @@ void test_vluxseg5ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_tu( @@ -251,7 +251,7 @@ void test_vluxseg5ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_tu( @@ -270,7 +270,7 @@ void test_vluxseg5ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_tu( @@ -289,7 +289,7 @@ void test_vluxseg5ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_tu( @@ -308,7 +308,7 @@ void test_vluxseg5ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_tu( @@ -327,7 +327,7 @@ void test_vluxseg5ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_tu( @@ -346,7 +346,7 @@ void test_vluxseg5ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_tu( @@ -365,7 +365,7 @@ void test_vluxseg5ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_tu( @@ -384,7 +384,7 @@ void test_vluxseg5ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_tu( @@ -403,7 +403,7 @@ void test_vluxseg5ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_tu( @@ -422,7 +422,7 @@ void test_vluxseg5ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_tu( @@ -441,7 +441,7 @@ void test_vluxseg5ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_tu( @@ -460,7 +460,7 @@ void test_vluxseg5ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_tu( @@ -479,7 +479,7 @@ void test_vluxseg5ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_tu( @@ -498,7 +498,7 @@ void test_vluxseg5ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_tum( @@ -517,7 +517,7 @@ void test_vluxseg5ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_tum( @@ -536,7 +536,7 @@ void test_vluxseg5ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_tum( @@ -555,7 +555,7 @@ void test_vluxseg5ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_tum( @@ -574,7 +574,7 @@ void test_vluxseg5ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_tum( @@ -593,7 +593,7 @@ void test_vluxseg5ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_tum( @@ -612,7 +612,7 @@ void test_vluxseg5ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_tum( @@ -631,7 +631,7 @@ void test_vluxseg5ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_tum( @@ -650,7 +650,7 @@ void test_vluxseg5ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_tum( @@ -669,7 +669,7 @@ void test_vluxseg5ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_tum( @@ -688,7 +688,7 @@ void test_vluxseg5ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_tum( @@ -707,7 +707,7 @@ void test_vluxseg5ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_tum( @@ -726,7 +726,7 @@ void test_vluxseg5ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_tum( @@ -745,7 +745,7 @@ void test_vluxseg5ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_tum( @@ -764,7 +764,7 @@ void test_vluxseg5ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_tum( @@ -783,7 +783,7 @@ void test_vluxseg5ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_tum( @@ -802,7 +802,7 @@ void test_vluxseg5ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_tum( @@ -821,7 +821,7 @@ void test_vluxseg5ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_tum( @@ -840,7 +840,7 @@ void test_vluxseg5ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_tum( @@ -859,7 +859,7 @@ void test_vluxseg5ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_tum( @@ -878,7 +878,7 @@ void test_vluxseg5ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_tum( @@ -897,7 +897,7 @@ void test_vluxseg5ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_tum( @@ -916,7 +916,7 @@ void test_vluxseg5ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_tum( @@ -935,7 +935,7 @@ void test_vluxseg5ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_tum( @@ -954,7 +954,7 @@ void test_vluxseg5ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_tum( @@ -973,7 +973,7 @@ void test_vluxseg5ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_tum( @@ -992,7 +992,7 @@ void test_vluxseg5ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_tumu( @@ -1011,7 +1011,7 @@ void test_vluxseg5ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_tumu( @@ -1030,7 +1030,7 @@ void test_vluxseg5ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_tumu( @@ -1049,7 +1049,7 @@ void test_vluxseg5ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_tumu( @@ -1068,7 +1068,7 @@ void test_vluxseg5ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_tumu( @@ -1087,7 +1087,7 @@ void test_vluxseg5ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_tumu( @@ -1106,7 +1106,7 @@ void test_vluxseg5ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_tumu( @@ -1125,7 +1125,7 @@ void test_vluxseg5ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_tumu( @@ -1144,7 +1144,7 @@ void test_vluxseg5ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_tumu( @@ -1163,7 +1163,7 @@ void test_vluxseg5ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_tumu( @@ -1182,7 +1182,7 @@ void test_vluxseg5ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_tumu( @@ -1201,7 +1201,7 @@ void test_vluxseg5ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_tumu( @@ -1220,7 +1220,7 @@ void test_vluxseg5ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_tumu( @@ -1239,7 +1239,7 @@ void test_vluxseg5ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_tumu( @@ -1258,7 +1258,7 @@ void test_vluxseg5ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_tumu( @@ -1277,7 +1277,7 @@ void test_vluxseg5ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_tumu( @@ -1296,7 +1296,7 @@ void test_vluxseg5ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_tumu( @@ -1315,7 +1315,7 @@ void test_vluxseg5ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_tumu( @@ -1334,7 +1334,7 @@ void test_vluxseg5ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_tumu( @@ -1353,7 +1353,7 @@ void test_vluxseg5ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_tumu( @@ -1372,7 +1372,7 @@ void test_vluxseg5ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_tumu( @@ -1391,7 +1391,7 @@ void test_vluxseg5ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_tumu( @@ -1410,7 +1410,7 @@ void test_vluxseg5ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg5ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_tumu( @@ -1448,7 +1448,7 @@ void test_vluxseg5ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_tumu( @@ -1467,7 +1467,7 @@ void test_vluxseg5ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_tumu( @@ -1486,7 +1486,7 @@ void test_vluxseg5ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_mu( @@ -1505,7 +1505,7 @@ void test_vluxseg5ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_mu( @@ -1524,7 +1524,7 @@ void test_vluxseg5ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_mu( @@ -1543,7 +1543,7 @@ void test_vluxseg5ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_mu( @@ -1562,7 +1562,7 @@ void test_vluxseg5ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_mu( @@ -1581,7 +1581,7 @@ void test_vluxseg5ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_mu( @@ -1600,7 +1600,7 @@ void test_vluxseg5ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_mu( @@ -1619,7 +1619,7 @@ void test_vluxseg5ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_mu( @@ -1638,7 +1638,7 @@ void test_vluxseg5ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_mu( @@ -1657,7 +1657,7 @@ void test_vluxseg5ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_mu( @@ -1676,7 +1676,7 @@ void test_vluxseg5ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_mu( @@ -1695,7 +1695,7 @@ void test_vluxseg5ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_mu( @@ -1714,7 +1714,7 @@ void test_vluxseg5ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_mu( @@ -1733,7 +1733,7 @@ void test_vluxseg5ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_mu( @@ -1752,7 +1752,7 @@ void test_vluxseg5ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_mu( @@ -1771,7 +1771,7 @@ void test_vluxseg5ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_mu( @@ -1790,7 +1790,7 @@ void test_vluxseg5ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_mu( @@ -1809,7 +1809,7 @@ void test_vluxseg5ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_mu( @@ -1828,7 +1828,7 @@ void test_vluxseg5ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_mu( @@ -1847,7 +1847,7 @@ void test_vluxseg5ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_mu( @@ -1866,7 +1866,7 @@ void test_vluxseg5ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_mu( @@ -1885,7 +1885,7 @@ void test_vluxseg5ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_mu( @@ -1904,7 +1904,7 @@ void test_vluxseg5ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_mu( @@ -1923,7 +1923,7 @@ void test_vluxseg5ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_mu( @@ -1942,7 +1942,7 @@ void test_vluxseg5ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_mu( @@ -1961,7 +1961,7 @@ void test_vluxseg5ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_mu( @@ -1980,6 +1980,6 @@ void test_vluxseg5ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei32_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c index 4164765a7724..5eed867fc988 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei64.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_tu( @@ -42,7 +42,7 @@ void test_vluxseg5ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_tu( @@ -61,7 +61,7 @@ void test_vluxseg5ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_tu( @@ -80,7 +80,7 @@ void test_vluxseg5ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_tu( @@ -99,7 +99,7 @@ void test_vluxseg5ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_tu( @@ -118,7 +118,7 @@ void test_vluxseg5ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_tu( @@ -137,7 +137,7 @@ void test_vluxseg5ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_tu( @@ -156,7 +156,7 @@ void test_vluxseg5ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_tu( @@ -175,7 +175,7 @@ void test_vluxseg5ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_tu( @@ -194,7 +194,7 @@ void test_vluxseg5ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_tu( @@ -213,7 +213,7 @@ void test_vluxseg5ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_tu( @@ -232,7 +232,7 @@ void test_vluxseg5ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_tu( @@ -251,7 +251,7 @@ void test_vluxseg5ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_tu( @@ -270,7 +270,7 @@ void test_vluxseg5ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_tu( @@ -289,7 +289,7 @@ void test_vluxseg5ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_tu( @@ -308,7 +308,7 @@ void test_vluxseg5ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_tu( @@ -327,7 +327,7 @@ void test_vluxseg5ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_tu( @@ -346,7 +346,7 @@ void test_vluxseg5ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_tu( @@ -365,7 +365,7 @@ void test_vluxseg5ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_tu( @@ -384,7 +384,7 @@ void test_vluxseg5ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_tu( @@ -403,7 +403,7 @@ void test_vluxseg5ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_tu( @@ -422,7 +422,7 @@ void test_vluxseg5ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_tu( @@ -441,7 +441,7 @@ void test_vluxseg5ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_tu( @@ -460,7 +460,7 @@ void test_vluxseg5ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_tu( @@ -479,7 +479,7 @@ void test_vluxseg5ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_tu( @@ -498,7 +498,7 @@ void test_vluxseg5ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_tum( @@ -517,7 +517,7 @@ void test_vluxseg5ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_tum( @@ -536,7 +536,7 @@ void test_vluxseg5ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_tum( @@ -555,7 +555,7 @@ void test_vluxseg5ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_tum( @@ -574,7 +574,7 @@ void test_vluxseg5ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_tum( @@ -593,7 +593,7 @@ void test_vluxseg5ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_tum( @@ -612,7 +612,7 @@ void test_vluxseg5ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_tum( @@ -631,7 +631,7 @@ void test_vluxseg5ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_tum( @@ -650,7 +650,7 @@ void test_vluxseg5ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_tum( @@ -669,7 +669,7 @@ void test_vluxseg5ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_tum( @@ -688,7 +688,7 @@ void test_vluxseg5ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_tum( @@ -707,7 +707,7 @@ void test_vluxseg5ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_tum( @@ -726,7 +726,7 @@ void test_vluxseg5ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_tum( @@ -745,7 +745,7 @@ void test_vluxseg5ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_tum( @@ -764,7 +764,7 @@ void test_vluxseg5ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_tum( @@ -783,7 +783,7 @@ void test_vluxseg5ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_tum( @@ -802,7 +802,7 @@ void test_vluxseg5ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_tum( @@ -821,7 +821,7 @@ void test_vluxseg5ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_tum( @@ -840,7 +840,7 @@ void test_vluxseg5ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_tum( @@ -859,7 +859,7 @@ void test_vluxseg5ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_tum( @@ -878,7 +878,7 @@ void test_vluxseg5ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_tum( @@ -897,7 +897,7 @@ void test_vluxseg5ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_tum( @@ -916,7 +916,7 @@ void test_vluxseg5ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_tum( @@ -935,7 +935,7 @@ void test_vluxseg5ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_tum( @@ -954,7 +954,7 @@ void test_vluxseg5ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_tum( @@ -973,7 +973,7 @@ void test_vluxseg5ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_tum( @@ -992,7 +992,7 @@ void test_vluxseg5ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_tumu( @@ -1011,7 +1011,7 @@ void test_vluxseg5ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_tumu( @@ -1030,7 +1030,7 @@ void test_vluxseg5ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_tumu( @@ -1049,7 +1049,7 @@ void test_vluxseg5ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_tumu( @@ -1068,7 +1068,7 @@ void test_vluxseg5ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_tumu( @@ -1087,7 +1087,7 @@ void test_vluxseg5ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_tumu( @@ -1106,7 +1106,7 @@ void test_vluxseg5ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_tumu( @@ -1125,7 +1125,7 @@ void test_vluxseg5ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_tumu( @@ -1144,7 +1144,7 @@ void test_vluxseg5ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_tumu( @@ -1163,7 +1163,7 @@ void test_vluxseg5ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_tumu( @@ -1182,7 +1182,7 @@ void test_vluxseg5ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_tumu( @@ -1201,7 +1201,7 @@ void test_vluxseg5ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_tumu( @@ -1220,7 +1220,7 @@ void test_vluxseg5ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_tumu( @@ -1239,7 +1239,7 @@ void test_vluxseg5ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_tumu( @@ -1258,7 +1258,7 @@ void test_vluxseg5ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_tumu( @@ -1277,7 +1277,7 @@ void test_vluxseg5ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_tumu( @@ -1296,7 +1296,7 @@ void test_vluxseg5ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_tumu( @@ -1315,7 +1315,7 @@ void test_vluxseg5ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_tumu( @@ -1334,7 +1334,7 @@ void test_vluxseg5ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_tumu( @@ -1353,7 +1353,7 @@ void test_vluxseg5ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_tumu( @@ -1372,7 +1372,7 @@ void test_vluxseg5ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_tumu( @@ -1391,7 +1391,7 @@ void test_vluxseg5ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_tumu( @@ -1410,7 +1410,7 @@ void test_vluxseg5ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg5ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_tumu( @@ -1448,7 +1448,7 @@ void test_vluxseg5ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_tumu( @@ -1467,7 +1467,7 @@ void test_vluxseg5ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_tumu( @@ -1486,7 +1486,7 @@ void test_vluxseg5ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_mu( @@ -1505,7 +1505,7 @@ void test_vluxseg5ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_mu( @@ -1524,7 +1524,7 @@ void test_vluxseg5ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_mu( @@ -1543,7 +1543,7 @@ void test_vluxseg5ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_mu( @@ -1562,7 +1562,7 @@ void test_vluxseg5ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_mu( @@ -1581,7 +1581,7 @@ void test_vluxseg5ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_mu( @@ -1600,7 +1600,7 @@ void test_vluxseg5ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_mu( @@ -1619,7 +1619,7 @@ void test_vluxseg5ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_mu( @@ -1638,7 +1638,7 @@ void test_vluxseg5ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_mu( @@ -1657,7 +1657,7 @@ void test_vluxseg5ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_mu( @@ -1676,7 +1676,7 @@ void test_vluxseg5ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_mu( @@ -1695,7 +1695,7 @@ void test_vluxseg5ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_mu( @@ -1714,7 +1714,7 @@ void test_vluxseg5ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_mu( @@ -1733,7 +1733,7 @@ void test_vluxseg5ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_mu( @@ -1752,7 +1752,7 @@ void test_vluxseg5ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_mu( @@ -1771,7 +1771,7 @@ void test_vluxseg5ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_mu( @@ -1790,7 +1790,7 @@ void test_vluxseg5ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_mu( @@ -1809,7 +1809,7 @@ void test_vluxseg5ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_mu( @@ -1828,7 +1828,7 @@ void test_vluxseg5ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_mu( @@ -1847,7 +1847,7 @@ void test_vluxseg5ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_mu( @@ -1866,7 +1866,7 @@ void test_vluxseg5ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_mu( @@ -1885,7 +1885,7 @@ void test_vluxseg5ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_mu( @@ -1904,7 +1904,7 @@ void test_vluxseg5ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_mu( @@ -1923,7 +1923,7 @@ void test_vluxseg5ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_mu( @@ -1942,7 +1942,7 @@ void test_vluxseg5ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_mu( @@ -1961,7 +1961,7 @@ void test_vluxseg5ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_mu( @@ -1980,6 +1980,6 @@ void test_vluxseg5ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei64_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c index cc36b9f80cba..f01887b3031e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg5ei8.c @@ -23,7 +23,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_tu( @@ -42,7 +42,7 @@ void test_vluxseg5ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_tu( @@ -61,7 +61,7 @@ void test_vluxseg5ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_tu( @@ -80,7 +80,7 @@ void test_vluxseg5ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_tu( @@ -99,7 +99,7 @@ void test_vluxseg5ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_tu( @@ -118,7 +118,7 @@ void test_vluxseg5ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_tu( @@ -137,7 +137,7 @@ void test_vluxseg5ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_tu( @@ -156,7 +156,7 @@ void test_vluxseg5ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_tu( @@ -175,7 +175,7 @@ void test_vluxseg5ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_tu( @@ -194,7 +194,7 @@ void test_vluxseg5ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_tu( @@ -213,7 +213,7 @@ void test_vluxseg5ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_tu( @@ -232,7 +232,7 @@ void test_vluxseg5ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_tu( @@ -251,7 +251,7 @@ void test_vluxseg5ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_tu( @@ -270,7 +270,7 @@ void test_vluxseg5ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_tu( @@ -289,7 +289,7 @@ void test_vluxseg5ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_tu( @@ -308,7 +308,7 @@ void test_vluxseg5ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_tu( @@ -327,7 +327,7 @@ void test_vluxseg5ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_tu( @@ -346,7 +346,7 @@ void test_vluxseg5ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_tu( @@ -365,7 +365,7 @@ void test_vluxseg5ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_tu( @@ -384,7 +384,7 @@ void test_vluxseg5ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_tu( @@ -403,7 +403,7 @@ void test_vluxseg5ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_tu( @@ -422,7 +422,7 @@ void test_vluxseg5ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_tu( @@ -441,7 +441,7 @@ void test_vluxseg5ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_tu( @@ -460,7 +460,7 @@ void test_vluxseg5ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_tu( @@ -479,7 +479,7 @@ void test_vluxseg5ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_tu( @@ -498,7 +498,7 @@ void test_vluxseg5ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_tum( @@ -517,7 +517,7 @@ void test_vluxseg5ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_tum( @@ -536,7 +536,7 @@ void test_vluxseg5ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_tum( @@ -555,7 +555,7 @@ void test_vluxseg5ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_tum( @@ -574,7 +574,7 @@ void test_vluxseg5ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_tum( @@ -593,7 +593,7 @@ void test_vluxseg5ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_tum( @@ -612,7 +612,7 @@ void test_vluxseg5ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_tum( @@ -631,7 +631,7 @@ void test_vluxseg5ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_tum( @@ -650,7 +650,7 @@ void test_vluxseg5ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_tum( @@ -669,7 +669,7 @@ void test_vluxseg5ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_tum( @@ -688,7 +688,7 @@ void test_vluxseg5ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_tum( @@ -707,7 +707,7 @@ void test_vluxseg5ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_tum( @@ -726,7 +726,7 @@ void test_vluxseg5ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_tum( @@ -745,7 +745,7 @@ void test_vluxseg5ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_tum( @@ -764,7 +764,7 @@ void test_vluxseg5ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_tum( @@ -783,7 +783,7 @@ void test_vluxseg5ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_tum( @@ -802,7 +802,7 @@ void test_vluxseg5ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_tum( @@ -821,7 +821,7 @@ void test_vluxseg5ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_tum( @@ -840,7 +840,7 @@ void test_vluxseg5ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_tum( @@ -859,7 +859,7 @@ void test_vluxseg5ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_tum( @@ -878,7 +878,7 @@ void test_vluxseg5ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_tum( @@ -897,7 +897,7 @@ void test_vluxseg5ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_tum( @@ -916,7 +916,7 @@ void test_vluxseg5ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_tum( @@ -935,7 +935,7 @@ void test_vluxseg5ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_tum( @@ -954,7 +954,7 @@ void test_vluxseg5ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_tum( @@ -973,7 +973,7 @@ void test_vluxseg5ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_tum( @@ -992,7 +992,7 @@ void test_vluxseg5ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_tumu( @@ -1011,7 +1011,7 @@ void test_vluxseg5ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_tumu( @@ -1030,7 +1030,7 @@ void test_vluxseg5ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_tumu( @@ -1049,7 +1049,7 @@ void test_vluxseg5ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_tumu( @@ -1068,7 +1068,7 @@ void test_vluxseg5ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_tumu( @@ -1087,7 +1087,7 @@ void test_vluxseg5ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_tumu( @@ -1106,7 +1106,7 @@ void test_vluxseg5ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_tumu( @@ -1125,7 +1125,7 @@ void test_vluxseg5ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_tumu( @@ -1144,7 +1144,7 @@ void test_vluxseg5ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_tumu( @@ -1163,7 +1163,7 @@ void test_vluxseg5ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_tumu( @@ -1182,7 +1182,7 @@ void test_vluxseg5ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_tumu( @@ -1201,7 +1201,7 @@ void test_vluxseg5ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_tumu( @@ -1220,7 +1220,7 @@ void test_vluxseg5ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_tumu( @@ -1239,7 +1239,7 @@ void test_vluxseg5ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_tumu( @@ -1258,7 +1258,7 @@ void test_vluxseg5ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_tumu( @@ -1277,7 +1277,7 @@ void test_vluxseg5ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_tumu( @@ -1296,7 +1296,7 @@ void test_vluxseg5ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_tumu( @@ -1315,7 +1315,7 @@ void test_vluxseg5ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_tumu( @@ -1334,7 +1334,7 @@ void test_vluxseg5ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_tumu( @@ -1353,7 +1353,7 @@ void test_vluxseg5ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_tumu( @@ -1372,7 +1372,7 @@ void test_vluxseg5ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_tumu( @@ -1391,7 +1391,7 @@ void test_vluxseg5ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_tumu( @@ -1410,7 +1410,7 @@ void test_vluxseg5ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg5ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_tumu( @@ -1448,7 +1448,7 @@ void test_vluxseg5ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_tumu( @@ -1467,7 +1467,7 @@ void test_vluxseg5ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_tumu( @@ -1486,7 +1486,7 @@ void test_vluxseg5ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_mu( @@ -1505,7 +1505,7 @@ void test_vluxseg5ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_mu( @@ -1524,7 +1524,7 @@ void test_vluxseg5ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_mu( @@ -1543,7 +1543,7 @@ void test_vluxseg5ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_mu( @@ -1562,7 +1562,7 @@ void test_vluxseg5ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_mu( @@ -1581,7 +1581,7 @@ void test_vluxseg5ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_mu( @@ -1600,7 +1600,7 @@ void test_vluxseg5ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_mu( @@ -1619,7 +1619,7 @@ void test_vluxseg5ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_mu( @@ -1638,7 +1638,7 @@ void test_vluxseg5ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_mu( @@ -1657,7 +1657,7 @@ void test_vluxseg5ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_mu( @@ -1676,7 +1676,7 @@ void test_vluxseg5ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_mu( @@ -1695,7 +1695,7 @@ void test_vluxseg5ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_mu( @@ -1714,7 +1714,7 @@ void test_vluxseg5ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_mu( @@ -1733,7 +1733,7 @@ void test_vluxseg5ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_mu( @@ -1752,7 +1752,7 @@ void test_vluxseg5ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_mu( @@ -1771,7 +1771,7 @@ void test_vluxseg5ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_mu( @@ -1790,7 +1790,7 @@ void test_vluxseg5ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_mu( @@ -1809,7 +1809,7 @@ void test_vluxseg5ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_mu( @@ -1828,7 +1828,7 @@ void test_vluxseg5ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_mu( @@ -1847,7 +1847,7 @@ void test_vluxseg5ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_mu( @@ -1866,7 +1866,7 @@ void test_vluxseg5ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_mu( @@ -1885,7 +1885,7 @@ void test_vluxseg5ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_mu( @@ -1904,7 +1904,7 @@ void test_vluxseg5ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_mu( @@ -1923,7 +1923,7 @@ void test_vluxseg5ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_mu( @@ -1942,7 +1942,7 @@ void test_vluxseg5ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_mu( @@ -1961,7 +1961,7 @@ void test_vluxseg5ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_mu( @@ -1980,6 +1980,6 @@ void test_vluxseg5ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg5ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); + return __riscv_vluxseg5ei8_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c index f1b640aa6ab5..d3cc289b8e28 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei16.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_tu( @@ -46,7 +46,7 @@ void test_vluxseg6ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_tu( @@ -67,7 +67,7 @@ void test_vluxseg6ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_tu( @@ -88,7 +88,7 @@ void test_vluxseg6ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_tu( @@ -109,7 +109,7 @@ void test_vluxseg6ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_tu( @@ -130,7 +130,7 @@ void test_vluxseg6ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_tu( @@ -151,7 +151,7 @@ void test_vluxseg6ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_tu( @@ -172,7 +172,7 @@ void test_vluxseg6ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_tu( @@ -193,7 +193,7 @@ void test_vluxseg6ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_tu( @@ -214,7 +214,7 @@ void test_vluxseg6ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_tu( @@ -235,7 +235,7 @@ void test_vluxseg6ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_tu( @@ -256,7 +256,7 @@ void test_vluxseg6ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vluxseg6ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_tu( @@ -298,7 +298,7 @@ void test_vluxseg6ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_tu( @@ -319,7 +319,7 @@ void test_vluxseg6ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_tu( @@ -340,7 +340,7 @@ void test_vluxseg6ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_tu( @@ -361,7 +361,7 @@ void test_vluxseg6ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_tu( @@ -382,7 +382,7 @@ void test_vluxseg6ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_tu( @@ -403,7 +403,7 @@ void test_vluxseg6ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_tu( @@ -424,7 +424,7 @@ void test_vluxseg6ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_tu( @@ -445,7 +445,7 @@ void test_vluxseg6ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_tu( @@ -466,7 +466,7 @@ void test_vluxseg6ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_tu( @@ -487,7 +487,7 @@ void test_vluxseg6ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_tu( @@ -508,7 +508,7 @@ void test_vluxseg6ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_tu( @@ -529,7 +529,7 @@ void test_vluxseg6ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_tu( @@ -550,7 +550,7 @@ void test_vluxseg6ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_tum( @@ -571,7 +571,7 @@ void test_vluxseg6ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_tum( @@ -592,7 +592,7 @@ void test_vluxseg6ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_tum( @@ -613,7 +613,7 @@ void test_vluxseg6ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vluxseg6ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_tum( @@ -655,7 +655,7 @@ void test_vluxseg6ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_tum( @@ -676,7 +676,7 @@ void test_vluxseg6ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_tum( @@ -697,7 +697,7 @@ void test_vluxseg6ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_tum( @@ -718,7 +718,7 @@ void test_vluxseg6ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vluxseg6ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_tum( @@ -760,7 +760,7 @@ void test_vluxseg6ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_tum( @@ -781,7 +781,7 @@ void test_vluxseg6ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_tum( @@ -802,7 +802,7 @@ void test_vluxseg6ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_tum( @@ -823,7 +823,7 @@ void test_vluxseg6ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vluxseg6ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_tum( @@ -865,7 +865,7 @@ void test_vluxseg6ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_tum( @@ -886,7 +886,7 @@ void test_vluxseg6ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_tum( @@ -907,7 +907,7 @@ void test_vluxseg6ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_tum( @@ -928,7 +928,7 @@ void test_vluxseg6ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vluxseg6ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_tum( @@ -970,7 +970,7 @@ void test_vluxseg6ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_tum( @@ -991,7 +991,7 @@ void test_vluxseg6ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_tum( @@ -1012,7 +1012,7 @@ void test_vluxseg6ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_tum( @@ -1033,7 +1033,7 @@ void test_vluxseg6ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg6ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_tum( @@ -1075,7 +1075,7 @@ void test_vluxseg6ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_tum( @@ -1096,7 +1096,7 @@ void test_vluxseg6ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_tumu( @@ -1117,7 +1117,7 @@ void test_vluxseg6ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_tumu( @@ -1138,7 +1138,7 @@ void test_vluxseg6ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vluxseg6ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_tumu( @@ -1180,7 +1180,7 @@ void test_vluxseg6ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_tumu( @@ -1201,7 +1201,7 @@ void test_vluxseg6ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_tumu( @@ -1222,7 +1222,7 @@ void test_vluxseg6ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_tumu( @@ -1243,7 +1243,7 @@ void test_vluxseg6ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_tumu( @@ -1264,7 +1264,7 @@ void test_vluxseg6ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_tumu( @@ -1285,7 +1285,7 @@ void test_vluxseg6ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_tumu( @@ -1306,7 +1306,7 @@ void test_vluxseg6ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_tumu( @@ -1327,7 +1327,7 @@ void test_vluxseg6ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_tumu( @@ -1348,7 +1348,7 @@ void test_vluxseg6ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg6ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_tumu( @@ -1390,7 +1390,7 @@ void test_vluxseg6ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_tumu( @@ -1411,7 +1411,7 @@ void test_vluxseg6ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_tumu( @@ -1432,7 +1432,7 @@ void test_vluxseg6ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_tumu( @@ -1453,7 +1453,7 @@ void test_vluxseg6ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_tumu( @@ -1474,7 +1474,7 @@ void test_vluxseg6ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_tumu( @@ -1495,7 +1495,7 @@ void test_vluxseg6ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_tumu( @@ -1516,7 +1516,7 @@ void test_vluxseg6ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_tumu( @@ -1537,7 +1537,7 @@ void test_vluxseg6ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_tumu( @@ -1558,7 +1558,7 @@ void test_vluxseg6ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg6ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_tumu( @@ -1600,7 +1600,7 @@ void test_vluxseg6ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_tumu( @@ -1621,7 +1621,7 @@ void test_vluxseg6ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_tumu( @@ -1642,7 +1642,7 @@ void test_vluxseg6ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_mu( @@ -1663,7 +1663,7 @@ void test_vluxseg6ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_mu( @@ -1684,7 +1684,7 @@ void test_vluxseg6ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_mu( @@ -1705,7 +1705,7 @@ void test_vluxseg6ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_mu( @@ -1726,7 +1726,7 @@ void test_vluxseg6ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_mu( @@ -1747,7 +1747,7 @@ void test_vluxseg6ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_mu( @@ -1768,7 +1768,7 @@ void test_vluxseg6ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_mu( @@ -1789,7 +1789,7 @@ void test_vluxseg6ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_mu( @@ -1810,7 +1810,7 @@ void test_vluxseg6ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_mu( @@ -1831,7 +1831,7 @@ void test_vluxseg6ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_mu( @@ -1852,7 +1852,7 @@ void test_vluxseg6ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_mu( @@ -1873,7 +1873,7 @@ void test_vluxseg6ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_mu( @@ -1894,7 +1894,7 @@ void test_vluxseg6ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_mu( @@ -1915,7 +1915,7 @@ void test_vluxseg6ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_mu( @@ -1936,7 +1936,7 @@ void test_vluxseg6ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_mu( @@ -1957,7 +1957,7 @@ void test_vluxseg6ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_mu( @@ -1978,7 +1978,7 @@ void test_vluxseg6ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_mu( @@ -1999,7 +1999,7 @@ void test_vluxseg6ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_mu( @@ -2020,7 +2020,7 @@ void test_vluxseg6ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_mu( @@ -2041,7 +2041,7 @@ void test_vluxseg6ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_mu( @@ -2062,7 +2062,7 @@ void test_vluxseg6ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_mu( @@ -2083,7 +2083,7 @@ void test_vluxseg6ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_mu( @@ -2104,7 +2104,7 @@ void test_vluxseg6ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_mu( @@ -2125,7 +2125,7 @@ void test_vluxseg6ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_mu( @@ -2146,7 +2146,7 @@ void test_vluxseg6ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_mu( @@ -2167,7 +2167,7 @@ void test_vluxseg6ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_mu( @@ -2188,6 +2188,6 @@ void test_vluxseg6ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c index fa32b64edd51..b6410a8617cc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei32.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_tu( @@ -46,7 +46,7 @@ void test_vluxseg6ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_tu( @@ -67,7 +67,7 @@ void test_vluxseg6ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_tu( @@ -88,7 +88,7 @@ void test_vluxseg6ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_tu( @@ -109,7 +109,7 @@ void test_vluxseg6ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_tu( @@ -130,7 +130,7 @@ void test_vluxseg6ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_tu( @@ -151,7 +151,7 @@ void test_vluxseg6ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_tu( @@ -172,7 +172,7 @@ void test_vluxseg6ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_tu( @@ -193,7 +193,7 @@ void test_vluxseg6ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_tu( @@ -214,7 +214,7 @@ void test_vluxseg6ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_tu( @@ -235,7 +235,7 @@ void test_vluxseg6ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_tu( @@ -256,7 +256,7 @@ void test_vluxseg6ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vluxseg6ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_tu( @@ -298,7 +298,7 @@ void test_vluxseg6ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_tu( @@ -319,7 +319,7 @@ void test_vluxseg6ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_tu( @@ -340,7 +340,7 @@ void test_vluxseg6ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_tu( @@ -361,7 +361,7 @@ void test_vluxseg6ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_tu( @@ -382,7 +382,7 @@ void test_vluxseg6ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_tu( @@ -403,7 +403,7 @@ void test_vluxseg6ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_tu( @@ -424,7 +424,7 @@ void test_vluxseg6ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_tu( @@ -445,7 +445,7 @@ void test_vluxseg6ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_tu( @@ -466,7 +466,7 @@ void test_vluxseg6ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_tu( @@ -487,7 +487,7 @@ void test_vluxseg6ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_tu( @@ -508,7 +508,7 @@ void test_vluxseg6ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_tu( @@ -529,7 +529,7 @@ void test_vluxseg6ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_tu( @@ -550,7 +550,7 @@ void test_vluxseg6ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_tum( @@ -571,7 +571,7 @@ void test_vluxseg6ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_tum( @@ -592,7 +592,7 @@ void test_vluxseg6ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_tum( @@ -613,7 +613,7 @@ void test_vluxseg6ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vluxseg6ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_tum( @@ -655,7 +655,7 @@ void test_vluxseg6ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_tum( @@ -676,7 +676,7 @@ void test_vluxseg6ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_tum( @@ -697,7 +697,7 @@ void test_vluxseg6ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_tum( @@ -718,7 +718,7 @@ void test_vluxseg6ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vluxseg6ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_tum( @@ -760,7 +760,7 @@ void test_vluxseg6ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_tum( @@ -781,7 +781,7 @@ void test_vluxseg6ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_tum( @@ -802,7 +802,7 @@ void test_vluxseg6ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_tum( @@ -823,7 +823,7 @@ void test_vluxseg6ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vluxseg6ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_tum( @@ -865,7 +865,7 @@ void test_vluxseg6ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_tum( @@ -886,7 +886,7 @@ void test_vluxseg6ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_tum( @@ -907,7 +907,7 @@ void test_vluxseg6ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_tum( @@ -928,7 +928,7 @@ void test_vluxseg6ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vluxseg6ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_tum( @@ -970,7 +970,7 @@ void test_vluxseg6ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_tum( @@ -991,7 +991,7 @@ void test_vluxseg6ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_tum( @@ -1012,7 +1012,7 @@ void test_vluxseg6ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_tum( @@ -1033,7 +1033,7 @@ void test_vluxseg6ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg6ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_tum( @@ -1075,7 +1075,7 @@ void test_vluxseg6ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_tum( @@ -1096,7 +1096,7 @@ void test_vluxseg6ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_tumu( @@ -1117,7 +1117,7 @@ void test_vluxseg6ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_tumu( @@ -1138,7 +1138,7 @@ void test_vluxseg6ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vluxseg6ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_tumu( @@ -1180,7 +1180,7 @@ void test_vluxseg6ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_tumu( @@ -1201,7 +1201,7 @@ void test_vluxseg6ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_tumu( @@ -1222,7 +1222,7 @@ void test_vluxseg6ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_tumu( @@ -1243,7 +1243,7 @@ void test_vluxseg6ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_tumu( @@ -1264,7 +1264,7 @@ void test_vluxseg6ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_tumu( @@ -1285,7 +1285,7 @@ void test_vluxseg6ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_tumu( @@ -1306,7 +1306,7 @@ void test_vluxseg6ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_tumu( @@ -1327,7 +1327,7 @@ void test_vluxseg6ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_tumu( @@ -1348,7 +1348,7 @@ void test_vluxseg6ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg6ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_tumu( @@ -1390,7 +1390,7 @@ void test_vluxseg6ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_tumu( @@ -1411,7 +1411,7 @@ void test_vluxseg6ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_tumu( @@ -1432,7 +1432,7 @@ void test_vluxseg6ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_tumu( @@ -1453,7 +1453,7 @@ void test_vluxseg6ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_tumu( @@ -1474,7 +1474,7 @@ void test_vluxseg6ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_tumu( @@ -1495,7 +1495,7 @@ void test_vluxseg6ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_tumu( @@ -1516,7 +1516,7 @@ void test_vluxseg6ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_tumu( @@ -1537,7 +1537,7 @@ void test_vluxseg6ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_tumu( @@ -1558,7 +1558,7 @@ void test_vluxseg6ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg6ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_tumu( @@ -1600,7 +1600,7 @@ void test_vluxseg6ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_tumu( @@ -1621,7 +1621,7 @@ void test_vluxseg6ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_tumu( @@ -1642,7 +1642,7 @@ void test_vluxseg6ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_mu( @@ -1663,7 +1663,7 @@ void test_vluxseg6ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_mu( @@ -1684,7 +1684,7 @@ void test_vluxseg6ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_mu( @@ -1705,7 +1705,7 @@ void test_vluxseg6ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_mu( @@ -1726,7 +1726,7 @@ void test_vluxseg6ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_mu( @@ -1747,7 +1747,7 @@ void test_vluxseg6ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_mu( @@ -1768,7 +1768,7 @@ void test_vluxseg6ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_mu( @@ -1789,7 +1789,7 @@ void test_vluxseg6ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_mu( @@ -1810,7 +1810,7 @@ void test_vluxseg6ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_mu( @@ -1831,7 +1831,7 @@ void test_vluxseg6ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_mu( @@ -1852,7 +1852,7 @@ void test_vluxseg6ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_mu( @@ -1873,7 +1873,7 @@ void test_vluxseg6ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_mu( @@ -1894,7 +1894,7 @@ void test_vluxseg6ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_mu( @@ -1915,7 +1915,7 @@ void test_vluxseg6ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_mu( @@ -1936,7 +1936,7 @@ void test_vluxseg6ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_mu( @@ -1957,7 +1957,7 @@ void test_vluxseg6ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_mu( @@ -1978,7 +1978,7 @@ void test_vluxseg6ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_mu( @@ -1999,7 +1999,7 @@ void test_vluxseg6ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_mu( @@ -2020,7 +2020,7 @@ void test_vluxseg6ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_mu( @@ -2041,7 +2041,7 @@ void test_vluxseg6ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_mu( @@ -2062,7 +2062,7 @@ void test_vluxseg6ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_mu( @@ -2083,7 +2083,7 @@ void test_vluxseg6ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_mu( @@ -2104,7 +2104,7 @@ void test_vluxseg6ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_mu( @@ -2125,7 +2125,7 @@ void test_vluxseg6ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_mu( @@ -2146,7 +2146,7 @@ void test_vluxseg6ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_mu( @@ -2167,7 +2167,7 @@ void test_vluxseg6ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_mu( @@ -2188,6 +2188,6 @@ void test_vluxseg6ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c index 098409b101d0..2dd89129184c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei64.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_tu( @@ -46,7 +46,7 @@ void test_vluxseg6ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_tu( @@ -67,7 +67,7 @@ void test_vluxseg6ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_tu( @@ -88,7 +88,7 @@ void test_vluxseg6ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_tu( @@ -109,7 +109,7 @@ void test_vluxseg6ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_tu( @@ -130,7 +130,7 @@ void test_vluxseg6ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_tu( @@ -151,7 +151,7 @@ void test_vluxseg6ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_tu( @@ -172,7 +172,7 @@ void test_vluxseg6ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_tu( @@ -193,7 +193,7 @@ void test_vluxseg6ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_tu( @@ -214,7 +214,7 @@ void test_vluxseg6ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_tu( @@ -235,7 +235,7 @@ void test_vluxseg6ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_tu( @@ -256,7 +256,7 @@ void test_vluxseg6ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vluxseg6ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_tu( @@ -298,7 +298,7 @@ void test_vluxseg6ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_tu( @@ -319,7 +319,7 @@ void test_vluxseg6ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_tu( @@ -340,7 +340,7 @@ void test_vluxseg6ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_tu( @@ -361,7 +361,7 @@ void test_vluxseg6ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_tu( @@ -382,7 +382,7 @@ void test_vluxseg6ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_tu( @@ -403,7 +403,7 @@ void test_vluxseg6ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_tu( @@ -424,7 +424,7 @@ void test_vluxseg6ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_tu( @@ -445,7 +445,7 @@ void test_vluxseg6ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_tu( @@ -466,7 +466,7 @@ void test_vluxseg6ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_tu( @@ -487,7 +487,7 @@ void test_vluxseg6ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_tu( @@ -508,7 +508,7 @@ void test_vluxseg6ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_tu( @@ -529,7 +529,7 @@ void test_vluxseg6ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_tu( @@ -550,7 +550,7 @@ void test_vluxseg6ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_tum( @@ -571,7 +571,7 @@ void test_vluxseg6ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_tum( @@ -592,7 +592,7 @@ void test_vluxseg6ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_tum( @@ -613,7 +613,7 @@ void test_vluxseg6ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vluxseg6ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_tum( @@ -655,7 +655,7 @@ void test_vluxseg6ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_tum( @@ -676,7 +676,7 @@ void test_vluxseg6ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_tum( @@ -697,7 +697,7 @@ void test_vluxseg6ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_tum( @@ -718,7 +718,7 @@ void test_vluxseg6ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vluxseg6ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_tum( @@ -760,7 +760,7 @@ void test_vluxseg6ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_tum( @@ -781,7 +781,7 @@ void test_vluxseg6ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_tum( @@ -802,7 +802,7 @@ void test_vluxseg6ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_tum( @@ -823,7 +823,7 @@ void test_vluxseg6ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vluxseg6ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_tum( @@ -865,7 +865,7 @@ void test_vluxseg6ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_tum( @@ -886,7 +886,7 @@ void test_vluxseg6ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_tum( @@ -907,7 +907,7 @@ void test_vluxseg6ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_tum( @@ -928,7 +928,7 @@ void test_vluxseg6ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vluxseg6ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_tum( @@ -970,7 +970,7 @@ void test_vluxseg6ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_tum( @@ -991,7 +991,7 @@ void test_vluxseg6ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_tum( @@ -1012,7 +1012,7 @@ void test_vluxseg6ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_tum( @@ -1033,7 +1033,7 @@ void test_vluxseg6ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg6ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_tum( @@ -1075,7 +1075,7 @@ void test_vluxseg6ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_tum( @@ -1096,7 +1096,7 @@ void test_vluxseg6ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_tumu( @@ -1117,7 +1117,7 @@ void test_vluxseg6ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_tumu( @@ -1138,7 +1138,7 @@ void test_vluxseg6ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vluxseg6ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_tumu( @@ -1180,7 +1180,7 @@ void test_vluxseg6ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_tumu( @@ -1201,7 +1201,7 @@ void test_vluxseg6ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_tumu( @@ -1222,7 +1222,7 @@ void test_vluxseg6ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_tumu( @@ -1243,7 +1243,7 @@ void test_vluxseg6ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_tumu( @@ -1264,7 +1264,7 @@ void test_vluxseg6ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_tumu( @@ -1285,7 +1285,7 @@ void test_vluxseg6ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_tumu( @@ -1306,7 +1306,7 @@ void test_vluxseg6ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_tumu( @@ -1327,7 +1327,7 @@ void test_vluxseg6ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_tumu( @@ -1348,7 +1348,7 @@ void test_vluxseg6ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg6ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_tumu( @@ -1390,7 +1390,7 @@ void test_vluxseg6ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_tumu( @@ -1411,7 +1411,7 @@ void test_vluxseg6ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_tumu( @@ -1432,7 +1432,7 @@ void test_vluxseg6ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_tumu( @@ -1453,7 +1453,7 @@ void test_vluxseg6ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_tumu( @@ -1474,7 +1474,7 @@ void test_vluxseg6ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_tumu( @@ -1495,7 +1495,7 @@ void test_vluxseg6ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_tumu( @@ -1516,7 +1516,7 @@ void test_vluxseg6ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_tumu( @@ -1537,7 +1537,7 @@ void test_vluxseg6ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_tumu( @@ -1558,7 +1558,7 @@ void test_vluxseg6ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg6ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_tumu( @@ -1600,7 +1600,7 @@ void test_vluxseg6ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_tumu( @@ -1621,7 +1621,7 @@ void test_vluxseg6ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_tumu( @@ -1642,7 +1642,7 @@ void test_vluxseg6ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_mu( @@ -1663,7 +1663,7 @@ void test_vluxseg6ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_mu( @@ -1684,7 +1684,7 @@ void test_vluxseg6ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_mu( @@ -1705,7 +1705,7 @@ void test_vluxseg6ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_mu( @@ -1726,7 +1726,7 @@ void test_vluxseg6ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_mu( @@ -1747,7 +1747,7 @@ void test_vluxseg6ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_mu( @@ -1768,7 +1768,7 @@ void test_vluxseg6ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_mu( @@ -1789,7 +1789,7 @@ void test_vluxseg6ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_mu( @@ -1810,7 +1810,7 @@ void test_vluxseg6ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_mu( @@ -1831,7 +1831,7 @@ void test_vluxseg6ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_mu( @@ -1852,7 +1852,7 @@ void test_vluxseg6ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_mu( @@ -1873,7 +1873,7 @@ void test_vluxseg6ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_mu( @@ -1894,7 +1894,7 @@ void test_vluxseg6ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_mu( @@ -1915,7 +1915,7 @@ void test_vluxseg6ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_mu( @@ -1936,7 +1936,7 @@ void test_vluxseg6ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_mu( @@ -1957,7 +1957,7 @@ void test_vluxseg6ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_mu( @@ -1978,7 +1978,7 @@ void test_vluxseg6ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_mu( @@ -1999,7 +1999,7 @@ void test_vluxseg6ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_mu( @@ -2020,7 +2020,7 @@ void test_vluxseg6ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_mu( @@ -2041,7 +2041,7 @@ void test_vluxseg6ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_mu( @@ -2062,7 +2062,7 @@ void test_vluxseg6ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_mu( @@ -2083,7 +2083,7 @@ void test_vluxseg6ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_mu( @@ -2104,7 +2104,7 @@ void test_vluxseg6ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_mu( @@ -2125,7 +2125,7 @@ void test_vluxseg6ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_mu( @@ -2146,7 +2146,7 @@ void test_vluxseg6ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_mu( @@ -2167,7 +2167,7 @@ void test_vluxseg6ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_mu( @@ -2188,6 +2188,6 @@ void test_vluxseg6ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c index 8b04a9b4b286..0166b68763be 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg6ei8.c @@ -25,7 +25,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_tu( @@ -46,7 +46,7 @@ void test_vluxseg6ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_tu( @@ -67,7 +67,7 @@ void test_vluxseg6ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_tu( @@ -88,7 +88,7 @@ void test_vluxseg6ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_tu( @@ -109,7 +109,7 @@ void test_vluxseg6ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_tu( @@ -130,7 +130,7 @@ void test_vluxseg6ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_tu( @@ -151,7 +151,7 @@ void test_vluxseg6ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_tu( @@ -172,7 +172,7 @@ void test_vluxseg6ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_tu( @@ -193,7 +193,7 @@ void test_vluxseg6ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_tu( @@ -214,7 +214,7 @@ void test_vluxseg6ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_tu( @@ -235,7 +235,7 @@ void test_vluxseg6ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_tu( @@ -256,7 +256,7 @@ void test_vluxseg6ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_tu( @@ -277,7 +277,7 @@ void test_vluxseg6ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_tu( @@ -298,7 +298,7 @@ void test_vluxseg6ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_tu( @@ -319,7 +319,7 @@ void test_vluxseg6ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_tu( @@ -340,7 +340,7 @@ void test_vluxseg6ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_tu( @@ -361,7 +361,7 @@ void test_vluxseg6ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_tu( @@ -382,7 +382,7 @@ void test_vluxseg6ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_tu( @@ -403,7 +403,7 @@ void test_vluxseg6ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_tu( @@ -424,7 +424,7 @@ void test_vluxseg6ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_tu( @@ -445,7 +445,7 @@ void test_vluxseg6ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_tu( @@ -466,7 +466,7 @@ void test_vluxseg6ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_tu( @@ -487,7 +487,7 @@ void test_vluxseg6ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_tu( @@ -508,7 +508,7 @@ void test_vluxseg6ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_tu( @@ -529,7 +529,7 @@ void test_vluxseg6ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_tu( @@ -550,7 +550,7 @@ void test_vluxseg6ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_tum( @@ -571,7 +571,7 @@ void test_vluxseg6ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_tum( @@ -592,7 +592,7 @@ void test_vluxseg6ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_tum( @@ -613,7 +613,7 @@ void test_vluxseg6ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_tum( @@ -634,7 +634,7 @@ void test_vluxseg6ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_tum( @@ -655,7 +655,7 @@ void test_vluxseg6ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_tum( @@ -676,7 +676,7 @@ void test_vluxseg6ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_tum( @@ -697,7 +697,7 @@ void test_vluxseg6ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_tum( @@ -718,7 +718,7 @@ void test_vluxseg6ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_tum( @@ -739,7 +739,7 @@ void test_vluxseg6ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_tum( @@ -760,7 +760,7 @@ void test_vluxseg6ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_tum( @@ -781,7 +781,7 @@ void test_vluxseg6ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_tum( @@ -802,7 +802,7 @@ void test_vluxseg6ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_tum( @@ -823,7 +823,7 @@ void test_vluxseg6ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_tum( @@ -844,7 +844,7 @@ void test_vluxseg6ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_tum( @@ -865,7 +865,7 @@ void test_vluxseg6ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_tum( @@ -886,7 +886,7 @@ void test_vluxseg6ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_tum( @@ -907,7 +907,7 @@ void test_vluxseg6ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_tum( @@ -928,7 +928,7 @@ void test_vluxseg6ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_tum( @@ -949,7 +949,7 @@ void test_vluxseg6ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_tum( @@ -970,7 +970,7 @@ void test_vluxseg6ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_tum( @@ -991,7 +991,7 @@ void test_vluxseg6ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_tum( @@ -1012,7 +1012,7 @@ void test_vluxseg6ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_tum( @@ -1033,7 +1033,7 @@ void test_vluxseg6ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg6ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_tum( @@ -1075,7 +1075,7 @@ void test_vluxseg6ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_tum( @@ -1096,7 +1096,7 @@ void test_vluxseg6ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_tumu( @@ -1117,7 +1117,7 @@ void test_vluxseg6ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_tumu( @@ -1138,7 +1138,7 @@ void test_vluxseg6ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_tumu( @@ -1159,7 +1159,7 @@ void test_vluxseg6ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_tumu( @@ -1180,7 +1180,7 @@ void test_vluxseg6ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_tumu( @@ -1201,7 +1201,7 @@ void test_vluxseg6ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_tumu( @@ -1222,7 +1222,7 @@ void test_vluxseg6ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_tumu( @@ -1243,7 +1243,7 @@ void test_vluxseg6ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_tumu( @@ -1264,7 +1264,7 @@ void test_vluxseg6ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_tumu( @@ -1285,7 +1285,7 @@ void test_vluxseg6ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_tumu( @@ -1306,7 +1306,7 @@ void test_vluxseg6ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_tumu( @@ -1327,7 +1327,7 @@ void test_vluxseg6ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_tumu( @@ -1348,7 +1348,7 @@ void test_vluxseg6ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_tumu( @@ -1369,7 +1369,7 @@ void test_vluxseg6ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_tumu( @@ -1390,7 +1390,7 @@ void test_vluxseg6ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_tumu( @@ -1411,7 +1411,7 @@ void test_vluxseg6ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_tumu( @@ -1432,7 +1432,7 @@ void test_vluxseg6ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_tumu( @@ -1453,7 +1453,7 @@ void test_vluxseg6ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_tumu( @@ -1474,7 +1474,7 @@ void test_vluxseg6ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_tumu( @@ -1495,7 +1495,7 @@ void test_vluxseg6ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_tumu( @@ -1516,7 +1516,7 @@ void test_vluxseg6ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_tumu( @@ -1537,7 +1537,7 @@ void test_vluxseg6ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_tumu( @@ -1558,7 +1558,7 @@ void test_vluxseg6ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg6ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_tumu( @@ -1600,7 +1600,7 @@ void test_vluxseg6ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_tumu( @@ -1621,7 +1621,7 @@ void test_vluxseg6ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_tumu( @@ -1642,7 +1642,7 @@ void test_vluxseg6ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_mu( @@ -1663,7 +1663,7 @@ void test_vluxseg6ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_mu( @@ -1684,7 +1684,7 @@ void test_vluxseg6ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_mu( @@ -1705,7 +1705,7 @@ void test_vluxseg6ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_mu( @@ -1726,7 +1726,7 @@ void test_vluxseg6ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_mu( @@ -1747,7 +1747,7 @@ void test_vluxseg6ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_mu( @@ -1768,7 +1768,7 @@ void test_vluxseg6ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_mu( @@ -1789,7 +1789,7 @@ void test_vluxseg6ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_mu( @@ -1810,7 +1810,7 @@ void test_vluxseg6ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_mu( @@ -1831,7 +1831,7 @@ void test_vluxseg6ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_mu( @@ -1852,7 +1852,7 @@ void test_vluxseg6ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_mu( @@ -1873,7 +1873,7 @@ void test_vluxseg6ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_mu( @@ -1894,7 +1894,7 @@ void test_vluxseg6ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_mu( @@ -1915,7 +1915,7 @@ void test_vluxseg6ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_mu( @@ -1936,7 +1936,7 @@ void test_vluxseg6ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_mu( @@ -1957,7 +1957,7 @@ void test_vluxseg6ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_mu( @@ -1978,7 +1978,7 @@ void test_vluxseg6ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_mu( @@ -1999,7 +1999,7 @@ void test_vluxseg6ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_mu( @@ -2020,7 +2020,7 @@ void test_vluxseg6ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_mu( @@ -2041,7 +2041,7 @@ void test_vluxseg6ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_mu( @@ -2062,7 +2062,7 @@ void test_vluxseg6ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_mu( @@ -2083,7 +2083,7 @@ void test_vluxseg6ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_mu( @@ -2104,7 +2104,7 @@ void test_vluxseg6ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_mu( @@ -2125,7 +2125,7 @@ void test_vluxseg6ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_mu( @@ -2146,7 +2146,7 @@ void test_vluxseg6ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_mu( @@ -2167,7 +2167,7 @@ void test_vluxseg6ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_mu( @@ -2188,6 +2188,6 @@ void test_vluxseg6ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg6ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); + return __riscv_vluxseg6ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c index 6f8457a06cce..ed454eaed26c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei16.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_tu( @@ -50,7 +50,7 @@ void test_vluxseg7ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_tu( @@ -73,7 +73,7 @@ void test_vluxseg7ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_tu( @@ -96,7 +96,7 @@ void test_vluxseg7ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_tu( @@ -119,7 +119,7 @@ void test_vluxseg7ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_tu( @@ -142,7 +142,7 @@ void test_vluxseg7ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_tu( @@ -165,7 +165,7 @@ void test_vluxseg7ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_tu( @@ -188,7 +188,7 @@ void test_vluxseg7ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_tu( @@ -211,7 +211,7 @@ void test_vluxseg7ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_tu( @@ -234,7 +234,7 @@ void test_vluxseg7ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_tu( @@ -257,7 +257,7 @@ void test_vluxseg7ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_tu( @@ -280,7 +280,7 @@ void test_vluxseg7ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_tu( @@ -303,7 +303,7 @@ void test_vluxseg7ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_tu( @@ -326,7 +326,7 @@ void test_vluxseg7ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_tu( @@ -349,7 +349,7 @@ void test_vluxseg7ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_tu( @@ -372,7 +372,7 @@ void test_vluxseg7ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_tu( @@ -395,7 +395,7 @@ void test_vluxseg7ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_tu( @@ -418,7 +418,7 @@ void test_vluxseg7ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_tu( @@ -441,7 +441,7 @@ void test_vluxseg7ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_tu( @@ -464,7 +464,7 @@ void test_vluxseg7ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_tu( @@ -487,7 +487,7 @@ void test_vluxseg7ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_tu( @@ -510,7 +510,7 @@ void test_vluxseg7ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_tu( @@ -533,7 +533,7 @@ void test_vluxseg7ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_tu( @@ -556,7 +556,7 @@ void test_vluxseg7ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_tu( @@ -579,7 +579,7 @@ void test_vluxseg7ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vluxseg7ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_tum( @@ -625,7 +625,7 @@ void test_vluxseg7ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_tum( @@ -648,7 +648,7 @@ void test_vluxseg7ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_tum( @@ -671,7 +671,7 @@ void test_vluxseg7ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_tum( @@ -694,7 +694,7 @@ void test_vluxseg7ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_tum( @@ -717,7 +717,7 @@ void test_vluxseg7ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_tum( @@ -740,7 +740,7 @@ void test_vluxseg7ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_tum( @@ -763,7 +763,7 @@ void test_vluxseg7ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_tum( @@ -786,7 +786,7 @@ void test_vluxseg7ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_tum( @@ -809,7 +809,7 @@ void test_vluxseg7ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_tum( @@ -832,7 +832,7 @@ void test_vluxseg7ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_tum( @@ -855,7 +855,7 @@ void test_vluxseg7ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_tum( @@ -878,7 +878,7 @@ void test_vluxseg7ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vluxseg7ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_tum( @@ -924,7 +924,7 @@ void test_vluxseg7ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_tum( @@ -947,7 +947,7 @@ void test_vluxseg7ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_tum( @@ -970,7 +970,7 @@ void test_vluxseg7ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_tum( @@ -993,7 +993,7 @@ void test_vluxseg7ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_tum( @@ -1016,7 +1016,7 @@ void test_vluxseg7ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_tum( @@ -1039,7 +1039,7 @@ void test_vluxseg7ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_tum( @@ -1062,7 +1062,7 @@ void test_vluxseg7ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_tum( @@ -1085,7 +1085,7 @@ void test_vluxseg7ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_tum( @@ -1108,7 +1108,7 @@ void test_vluxseg7ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_tum( @@ -1131,7 +1131,7 @@ void test_vluxseg7ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_tum( @@ -1154,7 +1154,7 @@ void test_vluxseg7ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_tum( @@ -1177,7 +1177,7 @@ void test_vluxseg7ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_tum( @@ -1200,7 +1200,7 @@ void test_vluxseg7ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_tumu( @@ -1223,7 +1223,7 @@ void test_vluxseg7ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_tumu( @@ -1246,7 +1246,7 @@ void test_vluxseg7ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_tumu( @@ -1269,7 +1269,7 @@ void test_vluxseg7ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_tumu( @@ -1292,7 +1292,7 @@ void test_vluxseg7ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_tumu( @@ -1315,7 +1315,7 @@ void test_vluxseg7ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_tumu( @@ -1338,7 +1338,7 @@ void test_vluxseg7ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_tumu( @@ -1361,7 +1361,7 @@ void test_vluxseg7ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_tumu( @@ -1384,7 +1384,7 @@ void test_vluxseg7ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_tumu( @@ -1407,7 +1407,7 @@ void test_vluxseg7ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_tumu( @@ -1430,7 +1430,7 @@ void test_vluxseg7ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_tumu( @@ -1453,7 +1453,7 @@ void test_vluxseg7ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_tumu( @@ -1476,7 +1476,7 @@ void test_vluxseg7ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_tumu( @@ -1499,7 +1499,7 @@ void test_vluxseg7ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_tumu( @@ -1522,7 +1522,7 @@ void test_vluxseg7ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_tumu( @@ -1545,7 +1545,7 @@ void test_vluxseg7ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_tumu( @@ -1568,7 +1568,7 @@ void test_vluxseg7ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_tumu( @@ -1591,7 +1591,7 @@ void test_vluxseg7ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_tumu( @@ -1614,7 +1614,7 @@ void test_vluxseg7ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_tumu( @@ -1637,7 +1637,7 @@ void test_vluxseg7ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_tumu( @@ -1660,7 +1660,7 @@ void test_vluxseg7ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_tumu( @@ -1683,7 +1683,7 @@ void test_vluxseg7ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_tumu( @@ -1706,7 +1706,7 @@ void test_vluxseg7ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_tumu( @@ -1729,7 +1729,7 @@ void test_vluxseg7ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_tumu( @@ -1752,7 +1752,7 @@ void test_vluxseg7ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_tumu( @@ -1775,7 +1775,7 @@ void test_vluxseg7ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_tumu( @@ -1798,7 +1798,7 @@ void test_vluxseg7ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_mu( @@ -1821,7 +1821,7 @@ void test_vluxseg7ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_mu( @@ -1844,7 +1844,7 @@ void test_vluxseg7ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_mu( @@ -1867,7 +1867,7 @@ void test_vluxseg7ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_mu( @@ -1890,7 +1890,7 @@ void test_vluxseg7ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_mu( @@ -1913,7 +1913,7 @@ void test_vluxseg7ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_mu( @@ -1936,7 +1936,7 @@ void test_vluxseg7ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_mu( @@ -1959,7 +1959,7 @@ void test_vluxseg7ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_mu( @@ -1982,7 +1982,7 @@ void test_vluxseg7ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_mu( @@ -2005,7 +2005,7 @@ void test_vluxseg7ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_mu( @@ -2028,7 +2028,7 @@ void test_vluxseg7ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_mu( @@ -2051,7 +2051,7 @@ void test_vluxseg7ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_mu( @@ -2074,7 +2074,7 @@ void test_vluxseg7ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_mu( @@ -2097,7 +2097,7 @@ void test_vluxseg7ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_mu( @@ -2120,7 +2120,7 @@ void test_vluxseg7ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_mu( @@ -2143,7 +2143,7 @@ void test_vluxseg7ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_mu( @@ -2166,7 +2166,7 @@ void test_vluxseg7ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_mu( @@ -2189,7 +2189,7 @@ void test_vluxseg7ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_mu( @@ -2212,7 +2212,7 @@ void test_vluxseg7ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_mu( @@ -2235,7 +2235,7 @@ void test_vluxseg7ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_mu( @@ -2258,7 +2258,7 @@ void test_vluxseg7ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_mu( @@ -2281,7 +2281,7 @@ void test_vluxseg7ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_mu( @@ -2304,7 +2304,7 @@ void test_vluxseg7ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_mu( @@ -2327,7 +2327,7 @@ void test_vluxseg7ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_mu( @@ -2350,7 +2350,7 @@ void test_vluxseg7ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_mu( @@ -2373,7 +2373,7 @@ void test_vluxseg7ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_mu( @@ -2396,6 +2396,6 @@ void test_vluxseg7ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c index c5f184c8d649..29344ae4b1e7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei32.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_tu( @@ -50,7 +50,7 @@ void test_vluxseg7ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_tu( @@ -73,7 +73,7 @@ void test_vluxseg7ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_tu( @@ -96,7 +96,7 @@ void test_vluxseg7ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_tu( @@ -119,7 +119,7 @@ void test_vluxseg7ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_tu( @@ -142,7 +142,7 @@ void test_vluxseg7ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_tu( @@ -165,7 +165,7 @@ void test_vluxseg7ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_tu( @@ -188,7 +188,7 @@ void test_vluxseg7ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_tu( @@ -211,7 +211,7 @@ void test_vluxseg7ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_tu( @@ -234,7 +234,7 @@ void test_vluxseg7ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_tu( @@ -257,7 +257,7 @@ void test_vluxseg7ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_tu( @@ -280,7 +280,7 @@ void test_vluxseg7ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_tu( @@ -303,7 +303,7 @@ void test_vluxseg7ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_tu( @@ -326,7 +326,7 @@ void test_vluxseg7ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_tu( @@ -349,7 +349,7 @@ void test_vluxseg7ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_tu( @@ -372,7 +372,7 @@ void test_vluxseg7ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_tu( @@ -395,7 +395,7 @@ void test_vluxseg7ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_tu( @@ -418,7 +418,7 @@ void test_vluxseg7ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_tu( @@ -441,7 +441,7 @@ void test_vluxseg7ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_tu( @@ -464,7 +464,7 @@ void test_vluxseg7ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_tu( @@ -487,7 +487,7 @@ void test_vluxseg7ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_tu( @@ -510,7 +510,7 @@ void test_vluxseg7ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_tu( @@ -533,7 +533,7 @@ void test_vluxseg7ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_tu( @@ -556,7 +556,7 @@ void test_vluxseg7ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_tu( @@ -579,7 +579,7 @@ void test_vluxseg7ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vluxseg7ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_tum( @@ -625,7 +625,7 @@ void test_vluxseg7ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_tum( @@ -648,7 +648,7 @@ void test_vluxseg7ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_tum( @@ -671,7 +671,7 @@ void test_vluxseg7ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_tum( @@ -694,7 +694,7 @@ void test_vluxseg7ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_tum( @@ -717,7 +717,7 @@ void test_vluxseg7ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_tum( @@ -740,7 +740,7 @@ void test_vluxseg7ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_tum( @@ -763,7 +763,7 @@ void test_vluxseg7ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_tum( @@ -786,7 +786,7 @@ void test_vluxseg7ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_tum( @@ -809,7 +809,7 @@ void test_vluxseg7ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_tum( @@ -832,7 +832,7 @@ void test_vluxseg7ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_tum( @@ -855,7 +855,7 @@ void test_vluxseg7ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_tum( @@ -878,7 +878,7 @@ void test_vluxseg7ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vluxseg7ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_tum( @@ -924,7 +924,7 @@ void test_vluxseg7ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_tum( @@ -947,7 +947,7 @@ void test_vluxseg7ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_tum( @@ -970,7 +970,7 @@ void test_vluxseg7ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_tum( @@ -993,7 +993,7 @@ void test_vluxseg7ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_tum( @@ -1016,7 +1016,7 @@ void test_vluxseg7ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_tum( @@ -1039,7 +1039,7 @@ void test_vluxseg7ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_tum( @@ -1062,7 +1062,7 @@ void test_vluxseg7ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_tum( @@ -1085,7 +1085,7 @@ void test_vluxseg7ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_tum( @@ -1108,7 +1108,7 @@ void test_vluxseg7ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_tum( @@ -1131,7 +1131,7 @@ void test_vluxseg7ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_tum( @@ -1154,7 +1154,7 @@ void test_vluxseg7ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_tum( @@ -1177,7 +1177,7 @@ void test_vluxseg7ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_tum( @@ -1200,7 +1200,7 @@ void test_vluxseg7ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_tumu( @@ -1223,7 +1223,7 @@ void test_vluxseg7ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_tumu( @@ -1246,7 +1246,7 @@ void test_vluxseg7ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_tumu( @@ -1269,7 +1269,7 @@ void test_vluxseg7ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_tumu( @@ -1292,7 +1292,7 @@ void test_vluxseg7ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_tumu( @@ -1315,7 +1315,7 @@ void test_vluxseg7ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_tumu( @@ -1338,7 +1338,7 @@ void test_vluxseg7ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_tumu( @@ -1361,7 +1361,7 @@ void test_vluxseg7ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_tumu( @@ -1384,7 +1384,7 @@ void test_vluxseg7ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_tumu( @@ -1407,7 +1407,7 @@ void test_vluxseg7ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_tumu( @@ -1430,7 +1430,7 @@ void test_vluxseg7ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_tumu( @@ -1453,7 +1453,7 @@ void test_vluxseg7ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_tumu( @@ -1476,7 +1476,7 @@ void test_vluxseg7ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_tumu( @@ -1499,7 +1499,7 @@ void test_vluxseg7ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_tumu( @@ -1522,7 +1522,7 @@ void test_vluxseg7ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_tumu( @@ -1545,7 +1545,7 @@ void test_vluxseg7ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_tumu( @@ -1568,7 +1568,7 @@ void test_vluxseg7ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_tumu( @@ -1591,7 +1591,7 @@ void test_vluxseg7ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_tumu( @@ -1614,7 +1614,7 @@ void test_vluxseg7ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_tumu( @@ -1637,7 +1637,7 @@ void test_vluxseg7ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_tumu( @@ -1660,7 +1660,7 @@ void test_vluxseg7ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_tumu( @@ -1683,7 +1683,7 @@ void test_vluxseg7ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_tumu( @@ -1706,7 +1706,7 @@ void test_vluxseg7ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_tumu( @@ -1729,7 +1729,7 @@ void test_vluxseg7ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_tumu( @@ -1752,7 +1752,7 @@ void test_vluxseg7ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_tumu( @@ -1775,7 +1775,7 @@ void test_vluxseg7ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_tumu( @@ -1798,7 +1798,7 @@ void test_vluxseg7ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_mu( @@ -1821,7 +1821,7 @@ void test_vluxseg7ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_mu( @@ -1844,7 +1844,7 @@ void test_vluxseg7ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_mu( @@ -1867,7 +1867,7 @@ void test_vluxseg7ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_mu( @@ -1890,7 +1890,7 @@ void test_vluxseg7ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_mu( @@ -1913,7 +1913,7 @@ void test_vluxseg7ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_mu( @@ -1936,7 +1936,7 @@ void test_vluxseg7ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_mu( @@ -1959,7 +1959,7 @@ void test_vluxseg7ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_mu( @@ -1982,7 +1982,7 @@ void test_vluxseg7ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_mu( @@ -2005,7 +2005,7 @@ void test_vluxseg7ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_mu( @@ -2028,7 +2028,7 @@ void test_vluxseg7ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_mu( @@ -2051,7 +2051,7 @@ void test_vluxseg7ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_mu( @@ -2074,7 +2074,7 @@ void test_vluxseg7ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_mu( @@ -2097,7 +2097,7 @@ void test_vluxseg7ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_mu( @@ -2120,7 +2120,7 @@ void test_vluxseg7ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_mu( @@ -2143,7 +2143,7 @@ void test_vluxseg7ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_mu( @@ -2166,7 +2166,7 @@ void test_vluxseg7ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_mu( @@ -2189,7 +2189,7 @@ void test_vluxseg7ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_mu( @@ -2212,7 +2212,7 @@ void test_vluxseg7ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_mu( @@ -2235,7 +2235,7 @@ void test_vluxseg7ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_mu( @@ -2258,7 +2258,7 @@ void test_vluxseg7ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_mu( @@ -2281,7 +2281,7 @@ void test_vluxseg7ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_mu( @@ -2304,7 +2304,7 @@ void test_vluxseg7ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_mu( @@ -2327,7 +2327,7 @@ void test_vluxseg7ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_mu( @@ -2350,7 +2350,7 @@ void test_vluxseg7ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_mu( @@ -2373,7 +2373,7 @@ void test_vluxseg7ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_mu( @@ -2396,6 +2396,6 @@ void test_vluxseg7ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c index 80059349ff06..ba3bbf2efecb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei64.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_tu( @@ -50,7 +50,7 @@ void test_vluxseg7ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_tu( @@ -73,7 +73,7 @@ void test_vluxseg7ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_tu( @@ -96,7 +96,7 @@ void test_vluxseg7ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_tu( @@ -119,7 +119,7 @@ void test_vluxseg7ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_tu( @@ -142,7 +142,7 @@ void test_vluxseg7ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_tu( @@ -165,7 +165,7 @@ void test_vluxseg7ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_tu( @@ -188,7 +188,7 @@ void test_vluxseg7ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_tu( @@ -211,7 +211,7 @@ void test_vluxseg7ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_tu( @@ -234,7 +234,7 @@ void test_vluxseg7ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_tu( @@ -257,7 +257,7 @@ void test_vluxseg7ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_tu( @@ -280,7 +280,7 @@ void test_vluxseg7ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_tu( @@ -303,7 +303,7 @@ void test_vluxseg7ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_tu( @@ -326,7 +326,7 @@ void test_vluxseg7ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_tu( @@ -349,7 +349,7 @@ void test_vluxseg7ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_tu( @@ -372,7 +372,7 @@ void test_vluxseg7ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_tu( @@ -395,7 +395,7 @@ void test_vluxseg7ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_tu( @@ -418,7 +418,7 @@ void test_vluxseg7ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_tu( @@ -441,7 +441,7 @@ void test_vluxseg7ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_tu( @@ -464,7 +464,7 @@ void test_vluxseg7ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_tu( @@ -487,7 +487,7 @@ void test_vluxseg7ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_tu( @@ -510,7 +510,7 @@ void test_vluxseg7ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_tu( @@ -533,7 +533,7 @@ void test_vluxseg7ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_tu( @@ -556,7 +556,7 @@ void test_vluxseg7ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_tu( @@ -579,7 +579,7 @@ void test_vluxseg7ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vluxseg7ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_tum( @@ -625,7 +625,7 @@ void test_vluxseg7ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_tum( @@ -648,7 +648,7 @@ void test_vluxseg7ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_tum( @@ -671,7 +671,7 @@ void test_vluxseg7ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_tum( @@ -694,7 +694,7 @@ void test_vluxseg7ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_tum( @@ -717,7 +717,7 @@ void test_vluxseg7ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_tum( @@ -740,7 +740,7 @@ void test_vluxseg7ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_tum( @@ -763,7 +763,7 @@ void test_vluxseg7ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_tum( @@ -786,7 +786,7 @@ void test_vluxseg7ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_tum( @@ -809,7 +809,7 @@ void test_vluxseg7ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_tum( @@ -832,7 +832,7 @@ void test_vluxseg7ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_tum( @@ -855,7 +855,7 @@ void test_vluxseg7ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_tum( @@ -878,7 +878,7 @@ void test_vluxseg7ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vluxseg7ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_tum( @@ -924,7 +924,7 @@ void test_vluxseg7ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_tum( @@ -947,7 +947,7 @@ void test_vluxseg7ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_tum( @@ -970,7 +970,7 @@ void test_vluxseg7ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_tum( @@ -993,7 +993,7 @@ void test_vluxseg7ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_tum( @@ -1016,7 +1016,7 @@ void test_vluxseg7ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_tum( @@ -1039,7 +1039,7 @@ void test_vluxseg7ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_tum( @@ -1062,7 +1062,7 @@ void test_vluxseg7ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_tum( @@ -1085,7 +1085,7 @@ void test_vluxseg7ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_tum( @@ -1108,7 +1108,7 @@ void test_vluxseg7ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_tum( @@ -1131,7 +1131,7 @@ void test_vluxseg7ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_tum( @@ -1154,7 +1154,7 @@ void test_vluxseg7ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_tum( @@ -1177,7 +1177,7 @@ void test_vluxseg7ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_tum( @@ -1200,7 +1200,7 @@ void test_vluxseg7ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_tumu( @@ -1223,7 +1223,7 @@ void test_vluxseg7ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_tumu( @@ -1246,7 +1246,7 @@ void test_vluxseg7ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_tumu( @@ -1269,7 +1269,7 @@ void test_vluxseg7ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_tumu( @@ -1292,7 +1292,7 @@ void test_vluxseg7ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_tumu( @@ -1315,7 +1315,7 @@ void test_vluxseg7ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_tumu( @@ -1338,7 +1338,7 @@ void test_vluxseg7ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_tumu( @@ -1361,7 +1361,7 @@ void test_vluxseg7ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_tumu( @@ -1384,7 +1384,7 @@ void test_vluxseg7ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_tumu( @@ -1407,7 +1407,7 @@ void test_vluxseg7ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_tumu( @@ -1430,7 +1430,7 @@ void test_vluxseg7ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_tumu( @@ -1453,7 +1453,7 @@ void test_vluxseg7ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_tumu( @@ -1476,7 +1476,7 @@ void test_vluxseg7ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_tumu( @@ -1499,7 +1499,7 @@ void test_vluxseg7ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_tumu( @@ -1522,7 +1522,7 @@ void test_vluxseg7ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_tumu( @@ -1545,7 +1545,7 @@ void test_vluxseg7ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_tumu( @@ -1568,7 +1568,7 @@ void test_vluxseg7ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_tumu( @@ -1591,7 +1591,7 @@ void test_vluxseg7ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_tumu( @@ -1614,7 +1614,7 @@ void test_vluxseg7ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_tumu( @@ -1637,7 +1637,7 @@ void test_vluxseg7ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_tumu( @@ -1660,7 +1660,7 @@ void test_vluxseg7ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_tumu( @@ -1683,7 +1683,7 @@ void test_vluxseg7ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_tumu( @@ -1706,7 +1706,7 @@ void test_vluxseg7ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_tumu( @@ -1729,7 +1729,7 @@ void test_vluxseg7ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_tumu( @@ -1752,7 +1752,7 @@ void test_vluxseg7ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_tumu( @@ -1775,7 +1775,7 @@ void test_vluxseg7ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_tumu( @@ -1798,7 +1798,7 @@ void test_vluxseg7ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_mu( @@ -1821,7 +1821,7 @@ void test_vluxseg7ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_mu( @@ -1844,7 +1844,7 @@ void test_vluxseg7ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_mu( @@ -1867,7 +1867,7 @@ void test_vluxseg7ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_mu( @@ -1890,7 +1890,7 @@ void test_vluxseg7ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_mu( @@ -1913,7 +1913,7 @@ void test_vluxseg7ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_mu( @@ -1936,7 +1936,7 @@ void test_vluxseg7ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_mu( @@ -1959,7 +1959,7 @@ void test_vluxseg7ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_mu( @@ -1982,7 +1982,7 @@ void test_vluxseg7ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_mu( @@ -2005,7 +2005,7 @@ void test_vluxseg7ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_mu( @@ -2028,7 +2028,7 @@ void test_vluxseg7ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_mu( @@ -2051,7 +2051,7 @@ void test_vluxseg7ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_mu( @@ -2074,7 +2074,7 @@ void test_vluxseg7ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_mu( @@ -2097,7 +2097,7 @@ void test_vluxseg7ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_mu( @@ -2120,7 +2120,7 @@ void test_vluxseg7ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_mu( @@ -2143,7 +2143,7 @@ void test_vluxseg7ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_mu( @@ -2166,7 +2166,7 @@ void test_vluxseg7ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_mu( @@ -2189,7 +2189,7 @@ void test_vluxseg7ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_mu( @@ -2212,7 +2212,7 @@ void test_vluxseg7ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_mu( @@ -2235,7 +2235,7 @@ void test_vluxseg7ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_mu( @@ -2258,7 +2258,7 @@ void test_vluxseg7ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_mu( @@ -2281,7 +2281,7 @@ void test_vluxseg7ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_mu( @@ -2304,7 +2304,7 @@ void test_vluxseg7ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_mu( @@ -2327,7 +2327,7 @@ void test_vluxseg7ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_mu( @@ -2350,7 +2350,7 @@ void test_vluxseg7ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_mu( @@ -2373,7 +2373,7 @@ void test_vluxseg7ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_mu( @@ -2396,6 +2396,6 @@ void test_vluxseg7ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c index a774b0b96f62..3e63ac9164c9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg7ei8.c @@ -27,7 +27,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_tu( @@ -50,7 +50,7 @@ void test_vluxseg7ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_tu( @@ -73,7 +73,7 @@ void test_vluxseg7ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_tu( @@ -96,7 +96,7 @@ void test_vluxseg7ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_tu( @@ -119,7 +119,7 @@ void test_vluxseg7ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_tu( @@ -142,7 +142,7 @@ void test_vluxseg7ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_tu( @@ -165,7 +165,7 @@ void test_vluxseg7ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_tu( @@ -188,7 +188,7 @@ void test_vluxseg7ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_tu( @@ -211,7 +211,7 @@ void test_vluxseg7ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_tu( @@ -234,7 +234,7 @@ void test_vluxseg7ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_tu( @@ -257,7 +257,7 @@ void test_vluxseg7ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_tu( @@ -280,7 +280,7 @@ void test_vluxseg7ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_tu( @@ -303,7 +303,7 @@ void test_vluxseg7ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_tu( @@ -326,7 +326,7 @@ void test_vluxseg7ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_tu( @@ -349,7 +349,7 @@ void test_vluxseg7ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_tu( @@ -372,7 +372,7 @@ void test_vluxseg7ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_tu( @@ -395,7 +395,7 @@ void test_vluxseg7ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_tu( @@ -418,7 +418,7 @@ void test_vluxseg7ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_tu( @@ -441,7 +441,7 @@ void test_vluxseg7ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_tu( @@ -464,7 +464,7 @@ void test_vluxseg7ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_tu( @@ -487,7 +487,7 @@ void test_vluxseg7ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_tu( @@ -510,7 +510,7 @@ void test_vluxseg7ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_tu( @@ -533,7 +533,7 @@ void test_vluxseg7ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_tu( @@ -556,7 +556,7 @@ void test_vluxseg7ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_tu( @@ -579,7 +579,7 @@ void test_vluxseg7ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_tu( @@ -602,7 +602,7 @@ void test_vluxseg7ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_tum( @@ -625,7 +625,7 @@ void test_vluxseg7ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_tum( @@ -648,7 +648,7 @@ void test_vluxseg7ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_tum( @@ -671,7 +671,7 @@ void test_vluxseg7ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_tum( @@ -694,7 +694,7 @@ void test_vluxseg7ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_tum( @@ -717,7 +717,7 @@ void test_vluxseg7ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_tum( @@ -740,7 +740,7 @@ void test_vluxseg7ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_tum( @@ -763,7 +763,7 @@ void test_vluxseg7ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_tum( @@ -786,7 +786,7 @@ void test_vluxseg7ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_tum( @@ -809,7 +809,7 @@ void test_vluxseg7ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_tum( @@ -832,7 +832,7 @@ void test_vluxseg7ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_tum( @@ -855,7 +855,7 @@ void test_vluxseg7ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_tum( @@ -878,7 +878,7 @@ void test_vluxseg7ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_tum( @@ -901,7 +901,7 @@ void test_vluxseg7ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_tum( @@ -924,7 +924,7 @@ void test_vluxseg7ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_tum( @@ -947,7 +947,7 @@ void test_vluxseg7ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_tum( @@ -970,7 +970,7 @@ void test_vluxseg7ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_tum( @@ -993,7 +993,7 @@ void test_vluxseg7ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_tum( @@ -1016,7 +1016,7 @@ void test_vluxseg7ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_tum( @@ -1039,7 +1039,7 @@ void test_vluxseg7ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_tum( @@ -1062,7 +1062,7 @@ void test_vluxseg7ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_tum( @@ -1085,7 +1085,7 @@ void test_vluxseg7ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_tum( @@ -1108,7 +1108,7 @@ void test_vluxseg7ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_tum( @@ -1131,7 +1131,7 @@ void test_vluxseg7ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_tum( @@ -1154,7 +1154,7 @@ void test_vluxseg7ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_tum( @@ -1177,7 +1177,7 @@ void test_vluxseg7ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_tum( @@ -1200,7 +1200,7 @@ void test_vluxseg7ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_tumu( @@ -1223,7 +1223,7 @@ void test_vluxseg7ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_tumu( @@ -1246,7 +1246,7 @@ void test_vluxseg7ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_tumu( @@ -1269,7 +1269,7 @@ void test_vluxseg7ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_tumu( @@ -1292,7 +1292,7 @@ void test_vluxseg7ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_tumu( @@ -1315,7 +1315,7 @@ void test_vluxseg7ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_tumu( @@ -1338,7 +1338,7 @@ void test_vluxseg7ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_tumu( @@ -1361,7 +1361,7 @@ void test_vluxseg7ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_tumu( @@ -1384,7 +1384,7 @@ void test_vluxseg7ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_tumu( @@ -1407,7 +1407,7 @@ void test_vluxseg7ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_tumu( @@ -1430,7 +1430,7 @@ void test_vluxseg7ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_tumu( @@ -1453,7 +1453,7 @@ void test_vluxseg7ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_tumu( @@ -1476,7 +1476,7 @@ void test_vluxseg7ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_tumu( @@ -1499,7 +1499,7 @@ void test_vluxseg7ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_tumu( @@ -1522,7 +1522,7 @@ void test_vluxseg7ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_tumu( @@ -1545,7 +1545,7 @@ void test_vluxseg7ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_tumu( @@ -1568,7 +1568,7 @@ void test_vluxseg7ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_tumu( @@ -1591,7 +1591,7 @@ void test_vluxseg7ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_tumu( @@ -1614,7 +1614,7 @@ void test_vluxseg7ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_tumu( @@ -1637,7 +1637,7 @@ void test_vluxseg7ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_tumu( @@ -1660,7 +1660,7 @@ void test_vluxseg7ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_tumu( @@ -1683,7 +1683,7 @@ void test_vluxseg7ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_tumu( @@ -1706,7 +1706,7 @@ void test_vluxseg7ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_tumu( @@ -1729,7 +1729,7 @@ void test_vluxseg7ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_tumu( @@ -1752,7 +1752,7 @@ void test_vluxseg7ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_tumu( @@ -1775,7 +1775,7 @@ void test_vluxseg7ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_tumu( @@ -1798,7 +1798,7 @@ void test_vluxseg7ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_mu( @@ -1821,7 +1821,7 @@ void test_vluxseg7ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_mu( @@ -1844,7 +1844,7 @@ void test_vluxseg7ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_mu( @@ -1867,7 +1867,7 @@ void test_vluxseg7ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_mu( @@ -1890,7 +1890,7 @@ void test_vluxseg7ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_mu( @@ -1913,7 +1913,7 @@ void test_vluxseg7ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_mu( @@ -1936,7 +1936,7 @@ void test_vluxseg7ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_mu( @@ -1959,7 +1959,7 @@ void test_vluxseg7ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_mu( @@ -1982,7 +1982,7 @@ void test_vluxseg7ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_mu( @@ -2005,7 +2005,7 @@ void test_vluxseg7ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_mu( @@ -2028,7 +2028,7 @@ void test_vluxseg7ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_mu( @@ -2051,7 +2051,7 @@ void test_vluxseg7ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_mu( @@ -2074,7 +2074,7 @@ void test_vluxseg7ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_mu( @@ -2097,7 +2097,7 @@ void test_vluxseg7ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_mu( @@ -2120,7 +2120,7 @@ void test_vluxseg7ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_mu( @@ -2143,7 +2143,7 @@ void test_vluxseg7ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_mu( @@ -2166,7 +2166,7 @@ void test_vluxseg7ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_mu( @@ -2189,7 +2189,7 @@ void test_vluxseg7ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_mu( @@ -2212,7 +2212,7 @@ void test_vluxseg7ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_mu( @@ -2235,7 +2235,7 @@ void test_vluxseg7ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_mu( @@ -2258,7 +2258,7 @@ void test_vluxseg7ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_mu( @@ -2281,7 +2281,7 @@ void test_vluxseg7ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_mu( @@ -2304,7 +2304,7 @@ void test_vluxseg7ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_mu( @@ -2327,7 +2327,7 @@ void test_vluxseg7ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_mu( @@ -2350,7 +2350,7 @@ void test_vluxseg7ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_mu( @@ -2373,7 +2373,7 @@ void test_vluxseg7ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_mu( @@ -2396,6 +2396,6 @@ void test_vluxseg7ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg7ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); + return __riscv_vluxseg7ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c index 02a521f53a0d..37a82d10568f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei16.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_tu( @@ -54,7 +54,7 @@ void test_vluxseg8ei16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_tu( @@ -79,7 +79,7 @@ void test_vluxseg8ei16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_tu( @@ -104,7 +104,7 @@ void test_vluxseg8ei16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_tu( @@ -129,7 +129,7 @@ void test_vluxseg8ei16_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_tu( @@ -154,7 +154,7 @@ void test_vluxseg8ei16_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_tu( @@ -179,7 +179,7 @@ void test_vluxseg8ei16_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_tu( @@ -204,7 +204,7 @@ void test_vluxseg8ei16_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_tu( @@ -229,7 +229,7 @@ void test_vluxseg8ei16_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_tu( @@ -254,7 +254,7 @@ void test_vluxseg8ei16_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_tu( @@ -279,7 +279,7 @@ void test_vluxseg8ei16_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_tu( @@ -304,7 +304,7 @@ void test_vluxseg8ei16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_tu( @@ -329,7 +329,7 @@ void test_vluxseg8ei16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_tu( @@ -354,7 +354,7 @@ void test_vluxseg8ei16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_tu( @@ -379,7 +379,7 @@ void test_vluxseg8ei16_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_tu( @@ -404,7 +404,7 @@ void test_vluxseg8ei16_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_tu( @@ -429,7 +429,7 @@ void test_vluxseg8ei16_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_tu( @@ -454,7 +454,7 @@ void test_vluxseg8ei16_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_tu( @@ -479,7 +479,7 @@ void test_vluxseg8ei16_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_tu( @@ -504,7 +504,7 @@ void test_vluxseg8ei16_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_tu( @@ -529,7 +529,7 @@ void test_vluxseg8ei16_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_tu( @@ -554,7 +554,7 @@ void test_vluxseg8ei16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_tu( @@ -579,7 +579,7 @@ void test_vluxseg8ei16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_tu( @@ -604,7 +604,7 @@ void test_vluxseg8ei16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_tu( @@ -629,7 +629,7 @@ void test_vluxseg8ei16_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_tu( @@ -654,7 +654,7 @@ void test_vluxseg8ei16_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_tum( @@ -679,7 +679,7 @@ void test_vluxseg8ei16_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_tum( @@ -704,7 +704,7 @@ void test_vluxseg8ei16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_tum( @@ -729,7 +729,7 @@ void test_vluxseg8ei16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_tum( @@ -754,7 +754,7 @@ void test_vluxseg8ei16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_tum( @@ -779,7 +779,7 @@ void test_vluxseg8ei16_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_tum( @@ -804,7 +804,7 @@ void test_vluxseg8ei16_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_tum( @@ -829,7 +829,7 @@ void test_vluxseg8ei16_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_tum( @@ -854,7 +854,7 @@ void test_vluxseg8ei16_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_tum( @@ -879,7 +879,7 @@ void test_vluxseg8ei16_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_tum( @@ -904,7 +904,7 @@ void test_vluxseg8ei16_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_tum( @@ -929,7 +929,7 @@ void test_vluxseg8ei16_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_tum( @@ -954,7 +954,7 @@ void test_vluxseg8ei16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_tum( @@ -979,7 +979,7 @@ void test_vluxseg8ei16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_tum( @@ -1004,7 +1004,7 @@ void test_vluxseg8ei16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_tum( @@ -1029,7 +1029,7 @@ void test_vluxseg8ei16_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg8ei16_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_tum( @@ -1079,7 +1079,7 @@ void test_vluxseg8ei16_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_tum( @@ -1104,7 +1104,7 @@ void test_vluxseg8ei16_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_tum( @@ -1129,7 +1129,7 @@ void test_vluxseg8ei16_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_tum( @@ -1154,7 +1154,7 @@ void test_vluxseg8ei16_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_tum( @@ -1179,7 +1179,7 @@ void test_vluxseg8ei16_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_tum( @@ -1204,7 +1204,7 @@ void test_vluxseg8ei16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_tum( @@ -1229,7 +1229,7 @@ void test_vluxseg8ei16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_tum( @@ -1254,7 +1254,7 @@ void test_vluxseg8ei16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_tum( @@ -1279,7 +1279,7 @@ void test_vluxseg8ei16_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_tum( @@ -1304,7 +1304,7 @@ void test_vluxseg8ei16_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_tumu( @@ -1329,7 +1329,7 @@ void test_vluxseg8ei16_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vluxseg8ei16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_tumu( @@ -1379,7 +1379,7 @@ void test_vluxseg8ei16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_tumu( @@ -1404,7 +1404,7 @@ void test_vluxseg8ei16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg8ei16_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_tumu( @@ -1454,7 +1454,7 @@ void test_vluxseg8ei16_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_tumu( @@ -1479,7 +1479,7 @@ void test_vluxseg8ei16_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_tumu( @@ -1504,7 +1504,7 @@ void test_vluxseg8ei16_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_tumu( @@ -1529,7 +1529,7 @@ void test_vluxseg8ei16_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_tumu( @@ -1554,7 +1554,7 @@ void test_vluxseg8ei16_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg8ei16_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_tumu( @@ -1604,7 +1604,7 @@ void test_vluxseg8ei16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_tumu( @@ -1629,7 +1629,7 @@ void test_vluxseg8ei16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_tumu( @@ -1654,7 +1654,7 @@ void test_vluxseg8ei16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_tumu( @@ -1679,7 +1679,7 @@ void test_vluxseg8ei16_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_tumu( @@ -1704,7 +1704,7 @@ void test_vluxseg8ei16_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_tumu( @@ -1729,7 +1729,7 @@ void test_vluxseg8ei16_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_tumu( @@ -1754,7 +1754,7 @@ void test_vluxseg8ei16_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_tumu( @@ -1779,7 +1779,7 @@ void test_vluxseg8ei16_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_tumu( @@ -1804,7 +1804,7 @@ void test_vluxseg8ei16_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_tumu( @@ -1829,7 +1829,7 @@ void test_vluxseg8ei16_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_tumu( @@ -1854,7 +1854,7 @@ void test_vluxseg8ei16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_tumu( @@ -1879,7 +1879,7 @@ void test_vluxseg8ei16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_tumu( @@ -1904,7 +1904,7 @@ void test_vluxseg8ei16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_tumu( @@ -1929,7 +1929,7 @@ void test_vluxseg8ei16_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_tumu( @@ -1954,7 +1954,7 @@ void test_vluxseg8ei16_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_mu( @@ -1979,7 +1979,7 @@ void test_vluxseg8ei16_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_mu( @@ -2004,7 +2004,7 @@ void test_vluxseg8ei16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_mu( @@ -2029,7 +2029,7 @@ void test_vluxseg8ei16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_mu( @@ -2054,7 +2054,7 @@ void test_vluxseg8ei16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_mu( @@ -2079,7 +2079,7 @@ void test_vluxseg8ei16_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_mu( @@ -2104,7 +2104,7 @@ void test_vluxseg8ei16_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_mu( @@ -2129,7 +2129,7 @@ void test_vluxseg8ei16_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_mu( @@ -2154,7 +2154,7 @@ void test_vluxseg8ei16_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_mu( @@ -2179,7 +2179,7 @@ void test_vluxseg8ei16_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_mu( @@ -2204,7 +2204,7 @@ void test_vluxseg8ei16_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_mu( @@ -2229,7 +2229,7 @@ void test_vluxseg8ei16_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_mu( @@ -2254,7 +2254,7 @@ void test_vluxseg8ei16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_mu( @@ -2279,7 +2279,7 @@ void test_vluxseg8ei16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_mu( @@ -2304,7 +2304,7 @@ void test_vluxseg8ei16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_mu( @@ -2329,7 +2329,7 @@ void test_vluxseg8ei16_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_mu( @@ -2354,7 +2354,7 @@ void test_vluxseg8ei16_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_mu( @@ -2379,7 +2379,7 @@ void test_vluxseg8ei16_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_mu( @@ -2404,7 +2404,7 @@ void test_vluxseg8ei16_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_mu( @@ -2429,7 +2429,7 @@ void test_vluxseg8ei16_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_mu( @@ -2454,7 +2454,7 @@ void test_vluxseg8ei16_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_mu( @@ -2479,7 +2479,7 @@ void test_vluxseg8ei16_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_mu( @@ -2504,7 +2504,7 @@ void test_vluxseg8ei16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_mu( @@ -2529,7 +2529,7 @@ void test_vluxseg8ei16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_mu( @@ -2554,7 +2554,7 @@ void test_vluxseg8ei16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_mu( @@ -2579,7 +2579,7 @@ void test_vluxseg8ei16_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_mu( @@ -2604,6 +2604,6 @@ void test_vluxseg8ei16_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei16_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei16_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c index c962a0ce4b8d..e436410b981e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei32.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_tu( @@ -54,7 +54,7 @@ void test_vluxseg8ei32_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_tu( @@ -79,7 +79,7 @@ void test_vluxseg8ei32_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_tu( @@ -104,7 +104,7 @@ void test_vluxseg8ei32_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_tu( @@ -129,7 +129,7 @@ void test_vluxseg8ei32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_tu( @@ -154,7 +154,7 @@ void test_vluxseg8ei32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_tu( @@ -179,7 +179,7 @@ void test_vluxseg8ei32_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_tu( @@ -204,7 +204,7 @@ void test_vluxseg8ei32_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_tu( @@ -229,7 +229,7 @@ void test_vluxseg8ei32_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_tu( @@ -254,7 +254,7 @@ void test_vluxseg8ei32_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_tu( @@ -279,7 +279,7 @@ void test_vluxseg8ei32_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_tu( @@ -304,7 +304,7 @@ void test_vluxseg8ei32_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_tu( @@ -329,7 +329,7 @@ void test_vluxseg8ei32_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_tu( @@ -354,7 +354,7 @@ void test_vluxseg8ei32_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_tu( @@ -379,7 +379,7 @@ void test_vluxseg8ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_tu( @@ -404,7 +404,7 @@ void test_vluxseg8ei32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_tu( @@ -429,7 +429,7 @@ void test_vluxseg8ei32_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_tu( @@ -454,7 +454,7 @@ void test_vluxseg8ei32_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_tu( @@ -479,7 +479,7 @@ void test_vluxseg8ei32_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_tu( @@ -504,7 +504,7 @@ void test_vluxseg8ei32_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_tu( @@ -529,7 +529,7 @@ void test_vluxseg8ei32_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_tu( @@ -554,7 +554,7 @@ void test_vluxseg8ei32_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_tu( @@ -579,7 +579,7 @@ void test_vluxseg8ei32_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_tu( @@ -604,7 +604,7 @@ void test_vluxseg8ei32_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_tu( @@ -629,7 +629,7 @@ void test_vluxseg8ei32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_tu( @@ -654,7 +654,7 @@ void test_vluxseg8ei32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_tum( @@ -679,7 +679,7 @@ void test_vluxseg8ei32_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_tum( @@ -704,7 +704,7 @@ void test_vluxseg8ei32_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_tum( @@ -729,7 +729,7 @@ void test_vluxseg8ei32_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_tum( @@ -754,7 +754,7 @@ void test_vluxseg8ei32_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_tum( @@ -779,7 +779,7 @@ void test_vluxseg8ei32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_tum( @@ -804,7 +804,7 @@ void test_vluxseg8ei32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_tum( @@ -829,7 +829,7 @@ void test_vluxseg8ei32_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_tum( @@ -854,7 +854,7 @@ void test_vluxseg8ei32_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_tum( @@ -879,7 +879,7 @@ void test_vluxseg8ei32_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_tum( @@ -904,7 +904,7 @@ void test_vluxseg8ei32_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_tum( @@ -929,7 +929,7 @@ void test_vluxseg8ei32_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_tum( @@ -954,7 +954,7 @@ void test_vluxseg8ei32_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_tum( @@ -979,7 +979,7 @@ void test_vluxseg8ei32_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_tum( @@ -1004,7 +1004,7 @@ void test_vluxseg8ei32_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_tum( @@ -1029,7 +1029,7 @@ void test_vluxseg8ei32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg8ei32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_tum( @@ -1079,7 +1079,7 @@ void test_vluxseg8ei32_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_tum( @@ -1104,7 +1104,7 @@ void test_vluxseg8ei32_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_tum( @@ -1129,7 +1129,7 @@ void test_vluxseg8ei32_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_tum( @@ -1154,7 +1154,7 @@ void test_vluxseg8ei32_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_tum( @@ -1179,7 +1179,7 @@ void test_vluxseg8ei32_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_tum( @@ -1204,7 +1204,7 @@ void test_vluxseg8ei32_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_tum( @@ -1229,7 +1229,7 @@ void test_vluxseg8ei32_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_tum( @@ -1254,7 +1254,7 @@ void test_vluxseg8ei32_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_tum( @@ -1279,7 +1279,7 @@ void test_vluxseg8ei32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_tum( @@ -1304,7 +1304,7 @@ void test_vluxseg8ei32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_tumu( @@ -1329,7 +1329,7 @@ void test_vluxseg8ei32_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vluxseg8ei32_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_tumu( @@ -1379,7 +1379,7 @@ void test_vluxseg8ei32_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_tumu( @@ -1404,7 +1404,7 @@ void test_vluxseg8ei32_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg8ei32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_tumu( @@ -1454,7 +1454,7 @@ void test_vluxseg8ei32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_tumu( @@ -1479,7 +1479,7 @@ void test_vluxseg8ei32_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_tumu( @@ -1504,7 +1504,7 @@ void test_vluxseg8ei32_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_tumu( @@ -1529,7 +1529,7 @@ void test_vluxseg8ei32_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_tumu( @@ -1554,7 +1554,7 @@ void test_vluxseg8ei32_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg8ei32_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_tumu( @@ -1604,7 +1604,7 @@ void test_vluxseg8ei32_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_tumu( @@ -1629,7 +1629,7 @@ void test_vluxseg8ei32_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_tumu( @@ -1654,7 +1654,7 @@ void test_vluxseg8ei32_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_tumu( @@ -1679,7 +1679,7 @@ void test_vluxseg8ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_tumu( @@ -1704,7 +1704,7 @@ void test_vluxseg8ei32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_tumu( @@ -1729,7 +1729,7 @@ void test_vluxseg8ei32_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_tumu( @@ -1754,7 +1754,7 @@ void test_vluxseg8ei32_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_tumu( @@ -1779,7 +1779,7 @@ void test_vluxseg8ei32_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_tumu( @@ -1804,7 +1804,7 @@ void test_vluxseg8ei32_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_tumu( @@ -1829,7 +1829,7 @@ void test_vluxseg8ei32_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_tumu( @@ -1854,7 +1854,7 @@ void test_vluxseg8ei32_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_tumu( @@ -1879,7 +1879,7 @@ void test_vluxseg8ei32_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_tumu( @@ -1904,7 +1904,7 @@ void test_vluxseg8ei32_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_tumu( @@ -1929,7 +1929,7 @@ void test_vluxseg8ei32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_tumu( @@ -1954,7 +1954,7 @@ void test_vluxseg8ei32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_mu( @@ -1979,7 +1979,7 @@ void test_vluxseg8ei32_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_mu( @@ -2004,7 +2004,7 @@ void test_vluxseg8ei32_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_mu( @@ -2029,7 +2029,7 @@ void test_vluxseg8ei32_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_mu( @@ -2054,7 +2054,7 @@ void test_vluxseg8ei32_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_mu( @@ -2079,7 +2079,7 @@ void test_vluxseg8ei32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_mu( @@ -2104,7 +2104,7 @@ void test_vluxseg8ei32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_mu( @@ -2129,7 +2129,7 @@ void test_vluxseg8ei32_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_mu( @@ -2154,7 +2154,7 @@ void test_vluxseg8ei32_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_mu( @@ -2179,7 +2179,7 @@ void test_vluxseg8ei32_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_mu( @@ -2204,7 +2204,7 @@ void test_vluxseg8ei32_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_mu( @@ -2229,7 +2229,7 @@ void test_vluxseg8ei32_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_mu( @@ -2254,7 +2254,7 @@ void test_vluxseg8ei32_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_mu( @@ -2279,7 +2279,7 @@ void test_vluxseg8ei32_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_mu( @@ -2304,7 +2304,7 @@ void test_vluxseg8ei32_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_mu( @@ -2329,7 +2329,7 @@ void test_vluxseg8ei32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_mu( @@ -2354,7 +2354,7 @@ void test_vluxseg8ei32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_mu( @@ -2379,7 +2379,7 @@ void test_vluxseg8ei32_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_mu( @@ -2404,7 +2404,7 @@ void test_vluxseg8ei32_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_mu( @@ -2429,7 +2429,7 @@ void test_vluxseg8ei32_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_mu( @@ -2454,7 +2454,7 @@ void test_vluxseg8ei32_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_mu( @@ -2479,7 +2479,7 @@ void test_vluxseg8ei32_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_mu( @@ -2504,7 +2504,7 @@ void test_vluxseg8ei32_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_mu( @@ -2529,7 +2529,7 @@ void test_vluxseg8ei32_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_mu( @@ -2554,7 +2554,7 @@ void test_vluxseg8ei32_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_mu( @@ -2579,7 +2579,7 @@ void test_vluxseg8ei32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_mu( @@ -2604,6 +2604,6 @@ void test_vluxseg8ei32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei32_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei32_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c index 2c61208aabea..ac9850016426 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei64.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_tu( @@ -54,7 +54,7 @@ void test_vluxseg8ei64_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_tu( @@ -79,7 +79,7 @@ void test_vluxseg8ei64_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_tu( @@ -104,7 +104,7 @@ void test_vluxseg8ei64_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_tu( @@ -129,7 +129,7 @@ void test_vluxseg8ei64_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_tu( @@ -154,7 +154,7 @@ void test_vluxseg8ei64_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_tu( @@ -179,7 +179,7 @@ void test_vluxseg8ei64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_tu( @@ -204,7 +204,7 @@ void test_vluxseg8ei64_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_tu( @@ -229,7 +229,7 @@ void test_vluxseg8ei64_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_tu( @@ -254,7 +254,7 @@ void test_vluxseg8ei64_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_tu( @@ -279,7 +279,7 @@ void test_vluxseg8ei64_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_tu( @@ -304,7 +304,7 @@ void test_vluxseg8ei64_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_tu( @@ -329,7 +329,7 @@ void test_vluxseg8ei64_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_tu( @@ -354,7 +354,7 @@ void test_vluxseg8ei64_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_tu( @@ -379,7 +379,7 @@ void test_vluxseg8ei64_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_tu( @@ -404,7 +404,7 @@ void test_vluxseg8ei64_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_tu( @@ -429,7 +429,7 @@ void test_vluxseg8ei64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_tu( @@ -454,7 +454,7 @@ void test_vluxseg8ei64_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_tu( @@ -479,7 +479,7 @@ void test_vluxseg8ei64_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_tu( @@ -504,7 +504,7 @@ void test_vluxseg8ei64_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_tu( @@ -529,7 +529,7 @@ void test_vluxseg8ei64_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_tu( @@ -554,7 +554,7 @@ void test_vluxseg8ei64_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_tu( @@ -579,7 +579,7 @@ void test_vluxseg8ei64_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_tu( @@ -604,7 +604,7 @@ void test_vluxseg8ei64_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_tu( @@ -629,7 +629,7 @@ void test_vluxseg8ei64_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_tu( @@ -654,7 +654,7 @@ void test_vluxseg8ei64_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_tum( @@ -679,7 +679,7 @@ void test_vluxseg8ei64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_tum( @@ -704,7 +704,7 @@ void test_vluxseg8ei64_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_tum( @@ -729,7 +729,7 @@ void test_vluxseg8ei64_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_tum( @@ -754,7 +754,7 @@ void test_vluxseg8ei64_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_tum( @@ -779,7 +779,7 @@ void test_vluxseg8ei64_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_tum( @@ -804,7 +804,7 @@ void test_vluxseg8ei64_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_tum( @@ -829,7 +829,7 @@ void test_vluxseg8ei64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_tum( @@ -854,7 +854,7 @@ void test_vluxseg8ei64_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_tum( @@ -879,7 +879,7 @@ void test_vluxseg8ei64_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_tum( @@ -904,7 +904,7 @@ void test_vluxseg8ei64_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_tum( @@ -929,7 +929,7 @@ void test_vluxseg8ei64_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_tum( @@ -954,7 +954,7 @@ void test_vluxseg8ei64_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_tum( @@ -979,7 +979,7 @@ void test_vluxseg8ei64_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_tum( @@ -1004,7 +1004,7 @@ void test_vluxseg8ei64_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_tum( @@ -1029,7 +1029,7 @@ void test_vluxseg8ei64_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg8ei64_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_tum( @@ -1079,7 +1079,7 @@ void test_vluxseg8ei64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_tum( @@ -1104,7 +1104,7 @@ void test_vluxseg8ei64_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_tum( @@ -1129,7 +1129,7 @@ void test_vluxseg8ei64_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_tum( @@ -1154,7 +1154,7 @@ void test_vluxseg8ei64_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_tum( @@ -1179,7 +1179,7 @@ void test_vluxseg8ei64_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_tum( @@ -1204,7 +1204,7 @@ void test_vluxseg8ei64_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_tum( @@ -1229,7 +1229,7 @@ void test_vluxseg8ei64_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_tum( @@ -1254,7 +1254,7 @@ void test_vluxseg8ei64_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_tum( @@ -1279,7 +1279,7 @@ void test_vluxseg8ei64_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_tum( @@ -1304,7 +1304,7 @@ void test_vluxseg8ei64_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_tumu( @@ -1329,7 +1329,7 @@ void test_vluxseg8ei64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vluxseg8ei64_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_tumu( @@ -1379,7 +1379,7 @@ void test_vluxseg8ei64_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_tumu( @@ -1404,7 +1404,7 @@ void test_vluxseg8ei64_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg8ei64_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloa // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_tumu( @@ -1454,7 +1454,7 @@ void test_vluxseg8ei64_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_tumu( @@ -1479,7 +1479,7 @@ void test_vluxseg8ei64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_tumu( @@ -1504,7 +1504,7 @@ void test_vluxseg8ei64_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_tumu( @@ -1529,7 +1529,7 @@ void test_vluxseg8ei64_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_tumu( @@ -1554,7 +1554,7 @@ void test_vluxseg8ei64_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg8ei64_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_tumu( @@ -1604,7 +1604,7 @@ void test_vluxseg8ei64_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_tumu( @@ -1629,7 +1629,7 @@ void test_vluxseg8ei64_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_tumu( @@ -1654,7 +1654,7 @@ void test_vluxseg8ei64_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_tumu( @@ -1679,7 +1679,7 @@ void test_vluxseg8ei64_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_tumu( @@ -1704,7 +1704,7 @@ void test_vluxseg8ei64_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_tumu( @@ -1729,7 +1729,7 @@ void test_vluxseg8ei64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_tumu( @@ -1754,7 +1754,7 @@ void test_vluxseg8ei64_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_tumu( @@ -1779,7 +1779,7 @@ void test_vluxseg8ei64_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_tumu( @@ -1804,7 +1804,7 @@ void test_vluxseg8ei64_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_tumu( @@ -1829,7 +1829,7 @@ void test_vluxseg8ei64_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_tumu( @@ -1854,7 +1854,7 @@ void test_vluxseg8ei64_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_tumu( @@ -1879,7 +1879,7 @@ void test_vluxseg8ei64_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_tumu( @@ -1904,7 +1904,7 @@ void test_vluxseg8ei64_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_tumu( @@ -1929,7 +1929,7 @@ void test_vluxseg8ei64_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_tumu( @@ -1954,7 +1954,7 @@ void test_vluxseg8ei64_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_mu( @@ -1979,7 +1979,7 @@ void test_vluxseg8ei64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_mu( @@ -2004,7 +2004,7 @@ void test_vluxseg8ei64_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_mu( @@ -2029,7 +2029,7 @@ void test_vluxseg8ei64_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_mu( @@ -2054,7 +2054,7 @@ void test_vluxseg8ei64_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_mu( @@ -2079,7 +2079,7 @@ void test_vluxseg8ei64_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_mu( @@ -2104,7 +2104,7 @@ void test_vluxseg8ei64_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_mu( @@ -2129,7 +2129,7 @@ void test_vluxseg8ei64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_mu( @@ -2154,7 +2154,7 @@ void test_vluxseg8ei64_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_mu( @@ -2179,7 +2179,7 @@ void test_vluxseg8ei64_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_mu( @@ -2204,7 +2204,7 @@ void test_vluxseg8ei64_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_mu( @@ -2229,7 +2229,7 @@ void test_vluxseg8ei64_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_mu( @@ -2254,7 +2254,7 @@ void test_vluxseg8ei64_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_mu( @@ -2279,7 +2279,7 @@ void test_vluxseg8ei64_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_mu( @@ -2304,7 +2304,7 @@ void test_vluxseg8ei64_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_mu( @@ -2329,7 +2329,7 @@ void test_vluxseg8ei64_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_mu( @@ -2354,7 +2354,7 @@ void test_vluxseg8ei64_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_mu( @@ -2379,7 +2379,7 @@ void test_vluxseg8ei64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_mu( @@ -2404,7 +2404,7 @@ void test_vluxseg8ei64_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_mu( @@ -2429,7 +2429,7 @@ void test_vluxseg8ei64_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_mu( @@ -2454,7 +2454,7 @@ void test_vluxseg8ei64_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_mu( @@ -2479,7 +2479,7 @@ void test_vluxseg8ei64_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_mu( @@ -2504,7 +2504,7 @@ void test_vluxseg8ei64_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_mu( @@ -2529,7 +2529,7 @@ void test_vluxseg8ei64_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_mu( @@ -2554,7 +2554,7 @@ void test_vluxseg8ei64_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_mu( @@ -2579,7 +2579,7 @@ void test_vluxseg8ei64_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_mu( @@ -2604,6 +2604,6 @@ void test_vluxseg8ei64_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c index 39532fc8cadd..ee8abbf1c99c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vluxseg8ei8.c @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_tu( @@ -54,7 +54,7 @@ void test_vluxseg8ei8_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_tu( @@ -79,7 +79,7 @@ void test_vluxseg8ei8_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_tu( @@ -104,7 +104,7 @@ void test_vluxseg8ei8_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_tu( @@ -129,7 +129,7 @@ void test_vluxseg8ei8_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_tu( @@ -154,7 +154,7 @@ void test_vluxseg8ei8_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_tu( @@ -179,7 +179,7 @@ void test_vluxseg8ei8_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_tu( @@ -204,7 +204,7 @@ void test_vluxseg8ei8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_tu( @@ -229,7 +229,7 @@ void test_vluxseg8ei8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_tu( @@ -254,7 +254,7 @@ void test_vluxseg8ei8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_tu( @@ -279,7 +279,7 @@ void test_vluxseg8ei8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_tu( @@ -304,7 +304,7 @@ void test_vluxseg8ei8_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_tu( @@ -329,7 +329,7 @@ void test_vluxseg8ei8_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_tu( @@ -354,7 +354,7 @@ void test_vluxseg8ei8_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_tu( @@ -379,7 +379,7 @@ void test_vluxseg8ei8_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_tu( @@ -404,7 +404,7 @@ void test_vluxseg8ei8_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_tu( @@ -429,7 +429,7 @@ void test_vluxseg8ei8_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_tu( @@ -454,7 +454,7 @@ void test_vluxseg8ei8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_tu( @@ -479,7 +479,7 @@ void test_vluxseg8ei8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_tu( @@ -504,7 +504,7 @@ void test_vluxseg8ei8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_tu( @@ -529,7 +529,7 @@ void test_vluxseg8ei8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_tu( @@ -554,7 +554,7 @@ void test_vluxseg8ei8_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_tu( @@ -579,7 +579,7 @@ void test_vluxseg8ei8_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_tu( @@ -604,7 +604,7 @@ void test_vluxseg8ei8_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_tu( @@ -629,7 +629,7 @@ void test_vluxseg8ei8_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_tu( @@ -654,7 +654,7 @@ void test_vluxseg8ei8_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_tum( @@ -679,7 +679,7 @@ void test_vluxseg8ei8_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_tum( @@ -704,7 +704,7 @@ void test_vluxseg8ei8_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_tum( @@ -729,7 +729,7 @@ void test_vluxseg8ei8_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_tum( @@ -754,7 +754,7 @@ void test_vluxseg8ei8_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_tum( @@ -779,7 +779,7 @@ void test_vluxseg8ei8_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_tum( @@ -804,7 +804,7 @@ void test_vluxseg8ei8_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_tum( @@ -829,7 +829,7 @@ void test_vluxseg8ei8_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_tum( @@ -854,7 +854,7 @@ void test_vluxseg8ei8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_tum( @@ -879,7 +879,7 @@ void test_vluxseg8ei8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_tum( @@ -904,7 +904,7 @@ void test_vluxseg8ei8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_tum( @@ -929,7 +929,7 @@ void test_vluxseg8ei8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_tum( @@ -954,7 +954,7 @@ void test_vluxseg8ei8_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_tum( @@ -979,7 +979,7 @@ void test_vluxseg8ei8_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_tum( @@ -1004,7 +1004,7 @@ void test_vluxseg8ei8_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_tum( @@ -1029,7 +1029,7 @@ void test_vluxseg8ei8_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_tum( @@ -1054,7 +1054,7 @@ void test_vluxseg8ei8_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_tum( @@ -1079,7 +1079,7 @@ void test_vluxseg8ei8_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_tum( @@ -1104,7 +1104,7 @@ void test_vluxseg8ei8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_tum( @@ -1129,7 +1129,7 @@ void test_vluxseg8ei8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_tum( @@ -1154,7 +1154,7 @@ void test_vluxseg8ei8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_tum( @@ -1179,7 +1179,7 @@ void test_vluxseg8ei8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_tum( @@ -1204,7 +1204,7 @@ void test_vluxseg8ei8_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_tum( @@ -1229,7 +1229,7 @@ void test_vluxseg8ei8_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_tum( @@ -1254,7 +1254,7 @@ void test_vluxseg8ei8_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_tum( @@ -1279,7 +1279,7 @@ void test_vluxseg8ei8_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_tum( @@ -1304,7 +1304,7 @@ void test_vluxseg8ei8_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_tumu( @@ -1329,7 +1329,7 @@ void test_vluxseg8ei8_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_tumu( @@ -1354,7 +1354,7 @@ void test_vluxseg8ei8_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_tumu( @@ -1379,7 +1379,7 @@ void test_vluxseg8ei8_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_tumu( @@ -1404,7 +1404,7 @@ void test_vluxseg8ei8_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_tumu( @@ -1429,7 +1429,7 @@ void test_vluxseg8ei8_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_tumu( @@ -1454,7 +1454,7 @@ void test_vluxseg8ei8_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_tumu( @@ -1479,7 +1479,7 @@ void test_vluxseg8ei8_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_tumu( @@ -1504,7 +1504,7 @@ void test_vluxseg8ei8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_tumu( @@ -1529,7 +1529,7 @@ void test_vluxseg8ei8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_tumu( @@ -1554,7 +1554,7 @@ void test_vluxseg8ei8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_tumu( @@ -1579,7 +1579,7 @@ void test_vluxseg8ei8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_tumu( @@ -1604,7 +1604,7 @@ void test_vluxseg8ei8_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_tumu( @@ -1629,7 +1629,7 @@ void test_vluxseg8ei8_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_tumu( @@ -1654,7 +1654,7 @@ void test_vluxseg8ei8_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_tumu( @@ -1679,7 +1679,7 @@ void test_vluxseg8ei8_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_tumu( @@ -1704,7 +1704,7 @@ void test_vluxseg8ei8_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_tumu( @@ -1729,7 +1729,7 @@ void test_vluxseg8ei8_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_tumu( @@ -1754,7 +1754,7 @@ void test_vluxseg8ei8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_tumu( @@ -1779,7 +1779,7 @@ void test_vluxseg8ei8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_tumu( @@ -1804,7 +1804,7 @@ void test_vluxseg8ei8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_tumu( @@ -1829,7 +1829,7 @@ void test_vluxseg8ei8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_tumu( @@ -1854,7 +1854,7 @@ void test_vluxseg8ei8_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_tumu( @@ -1879,7 +1879,7 @@ void test_vluxseg8ei8_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_tumu( @@ -1904,7 +1904,7 @@ void test_vluxseg8ei8_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_tumu( @@ -1929,7 +1929,7 @@ void test_vluxseg8ei8_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32m // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_tumu( @@ -1954,7 +1954,7 @@ void test_vluxseg8ei8_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_mu( @@ -1979,7 +1979,7 @@ void test_vluxseg8ei8_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_mu( @@ -2004,7 +2004,7 @@ void test_vluxseg8ei8_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_mu( @@ -2029,7 +2029,7 @@ void test_vluxseg8ei8_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_mu( @@ -2054,7 +2054,7 @@ void test_vluxseg8ei8_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_mu( @@ -2079,7 +2079,7 @@ void test_vluxseg8ei8_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_mu( @@ -2104,7 +2104,7 @@ void test_vluxseg8ei8_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_mu( @@ -2129,7 +2129,7 @@ void test_vluxseg8ei8_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_mu( @@ -2154,7 +2154,7 @@ void test_vluxseg8ei8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_mu( @@ -2179,7 +2179,7 @@ void test_vluxseg8ei8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_mu( @@ -2204,7 +2204,7 @@ void test_vluxseg8ei8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_mu( @@ -2229,7 +2229,7 @@ void test_vluxseg8ei8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_mu( @@ -2254,7 +2254,7 @@ void test_vluxseg8ei8_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_mu( @@ -2279,7 +2279,7 @@ void test_vluxseg8ei8_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_mu( @@ -2304,7 +2304,7 @@ void test_vluxseg8ei8_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_mu( @@ -2329,7 +2329,7 @@ void test_vluxseg8ei8_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_mu( @@ -2354,7 +2354,7 @@ void test_vluxseg8ei8_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_mu( @@ -2379,7 +2379,7 @@ void test_vluxseg8ei8_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_mu( @@ -2404,7 +2404,7 @@ void test_vluxseg8ei8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_mu( @@ -2429,7 +2429,7 @@ void test_vluxseg8ei8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_mu( @@ -2454,7 +2454,7 @@ void test_vluxseg8ei8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_mu( @@ -2479,7 +2479,7 @@ void test_vluxseg8ei8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_mu( @@ -2504,7 +2504,7 @@ void test_vluxseg8ei8_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_mu( @@ -2529,7 +2529,7 @@ void test_vluxseg8ei8_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_mu( @@ -2554,7 +2554,7 @@ void test_vluxseg8ei8_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_mu( @@ -2579,7 +2579,7 @@ void test_vluxseg8ei8_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_mu( @@ -2604,6 +2604,6 @@ void test_vluxseg8ei8_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: ret void // void test_vluxseg8ei8_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); + return __riscv_vluxseg8ei8_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmacc.c index 18477fcf18c4..fa63896f03d0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vv_i8mf8_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_tu( @@ -22,7 +22,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vx_i8mf8_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_tu( @@ -31,7 +31,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vv_i8mf4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_tu( @@ -40,7 +40,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vx_i8mf4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_tu( @@ -49,7 +49,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vv_i8mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_tu( @@ -58,7 +58,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vx_i8mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_tu( @@ -67,7 +67,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmacc_vv_i8m1_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_tu( @@ -76,7 +76,7 @@ vint8m1_t test_vmacc_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmacc_vx_i8m1_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_tu( @@ -85,7 +85,7 @@ vint8m1_t test_vmacc_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmacc_vv_i8m2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_tu( @@ -94,7 +94,7 @@ vint8m2_t test_vmacc_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmacc_vx_i8m2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_tu( @@ -103,7 +103,7 @@ vint8m2_t test_vmacc_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmacc_vv_i8m4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_tu( @@ -112,7 +112,7 @@ vint8m4_t test_vmacc_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmacc_vx_i8m4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_tu( @@ -121,7 +121,7 @@ vint8m4_t test_vmacc_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmacc_vv_i8m8_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_tu( @@ -130,7 +130,7 @@ vint8m8_t test_vmacc_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmacc_vx_i8m8_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_tu( @@ -139,7 +139,7 @@ vint8m8_t test_vmacc_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vv_i16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_tu( @@ -148,7 +148,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vx_i16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_tu( @@ -157,7 +157,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vv_i16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_tu( @@ -166,7 +166,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vx_i16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_tu( @@ -175,7 +175,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmacc_vv_i16m1_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_tu( @@ -184,7 +184,7 @@ vint16m1_t test_vmacc_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmacc_vx_i16m1_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_tu( @@ -193,7 +193,7 @@ vint16m1_t test_vmacc_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmacc_vv_i16m2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_tu( @@ -202,7 +202,7 @@ vint16m2_t test_vmacc_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmacc_vx_i16m2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_tu( @@ -211,7 +211,7 @@ vint16m2_t test_vmacc_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmacc_vv_i16m4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_tu( @@ -220,7 +220,7 @@ vint16m4_t test_vmacc_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmacc_vx_i16m4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_tu( @@ -229,7 +229,7 @@ vint16m4_t test_vmacc_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmacc_vv_i16m8_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_tu( @@ -238,7 +238,7 @@ vint16m8_t test_vmacc_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmacc_vx_i16m8_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tu( @@ -247,7 +247,7 @@ vint16m8_t test_vmacc_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vv_i32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tu( @@ -256,7 +256,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vx_i32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_tu( @@ -265,7 +265,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmacc_vv_i32m1_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_tu( @@ -274,7 +274,7 @@ vint32m1_t test_vmacc_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmacc_vx_i32m1_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_tu( @@ -283,7 +283,7 @@ vint32m1_t test_vmacc_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmacc_vv_i32m2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_tu( @@ -292,7 +292,7 @@ vint32m2_t test_vmacc_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmacc_vx_i32m2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_tu( @@ -301,7 +301,7 @@ vint32m2_t test_vmacc_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmacc_vv_i32m4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_tu( @@ -310,7 +310,7 @@ vint32m4_t test_vmacc_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmacc_vx_i32m4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_tu( @@ -319,7 +319,7 @@ vint32m4_t test_vmacc_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmacc_vv_i32m8_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_tu( @@ -328,7 +328,7 @@ vint32m8_t test_vmacc_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmacc_vx_i32m8_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_tu( @@ -337,7 +337,7 @@ vint32m8_t test_vmacc_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmacc_vv_i64m1_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_tu( @@ -346,7 +346,7 @@ vint64m1_t test_vmacc_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmacc_vx_i64m1_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_tu( @@ -355,7 +355,7 @@ vint64m1_t test_vmacc_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmacc_vv_i64m2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_tu( @@ -364,7 +364,7 @@ vint64m2_t test_vmacc_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmacc_vx_i64m2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_tu( @@ -373,7 +373,7 @@ vint64m2_t test_vmacc_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmacc_vv_i64m4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_tu( @@ -382,7 +382,7 @@ vint64m4_t test_vmacc_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmacc_vx_i64m4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_tu( @@ -391,7 +391,7 @@ vint64m4_t test_vmacc_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmacc_vv_i64m8_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_tu( @@ -400,7 +400,7 @@ vint64m8_t test_vmacc_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmacc_vx_i64m8_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_tu( @@ -409,7 +409,7 @@ vint64m8_t test_vmacc_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vv_u8mf8_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_tu( @@ -418,7 +418,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vx_u8mf8_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_tu( @@ -427,7 +427,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vv_u8mf4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_tu( @@ -436,7 +436,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vx_u8mf4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_tu( @@ -445,7 +445,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vv_u8mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_tu( @@ -454,7 +454,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vx_u8mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_tu( @@ -463,7 +463,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vv_u8m1_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_tu( @@ -472,7 +472,7 @@ vuint8m1_t test_vmacc_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vx_u8m1_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_tu( @@ -481,7 +481,7 @@ vuint8m1_t test_vmacc_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vv_u8m2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_tu( @@ -490,7 +490,7 @@ vuint8m2_t test_vmacc_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vx_u8m2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_tu( @@ -499,7 +499,7 @@ vuint8m2_t test_vmacc_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vv_u8m4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_tu( @@ -508,7 +508,7 @@ vuint8m4_t test_vmacc_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vx_u8m4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_tu( @@ -517,7 +517,7 @@ vuint8m4_t test_vmacc_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vv_u8m8_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_tu( @@ -526,7 +526,7 @@ vuint8m8_t test_vmacc_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vx_u8m8_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_tu( @@ -535,7 +535,7 @@ vuint8m8_t test_vmacc_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vv_u16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_tu( @@ -544,7 +544,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vx_u16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_tu( @@ -553,7 +553,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vv_u16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_tu( @@ -562,7 +562,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vx_u16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_tu( @@ -571,7 +571,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vv_u16m1_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_tu( @@ -580,7 +580,7 @@ vuint16m1_t test_vmacc_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vx_u16m1_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_tu( @@ -589,7 +589,7 @@ vuint16m1_t test_vmacc_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vv_u16m2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_tu( @@ -598,7 +598,7 @@ vuint16m2_t test_vmacc_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vx_u16m2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_tu( @@ -607,7 +607,7 @@ vuint16m2_t test_vmacc_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vv_u16m4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_tu( @@ -616,7 +616,7 @@ vuint16m4_t test_vmacc_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vx_u16m4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_tu( @@ -625,7 +625,7 @@ vuint16m4_t test_vmacc_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vv_u16m8_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_tu( @@ -634,7 +634,7 @@ vuint16m8_t test_vmacc_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vx_u16m8_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tu( @@ -643,7 +643,7 @@ vuint16m8_t test_vmacc_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vv_u32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tu( @@ -652,7 +652,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vx_u32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_tu( @@ -661,7 +661,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vv_u32m1_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_tu( @@ -670,7 +670,7 @@ vuint32m1_t test_vmacc_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vx_u32m1_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_tu( @@ -679,7 +679,7 @@ vuint32m1_t test_vmacc_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vv_u32m2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_tu( @@ -688,7 +688,7 @@ vuint32m2_t test_vmacc_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vx_u32m2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_tu( @@ -697,7 +697,7 @@ vuint32m2_t test_vmacc_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vv_u32m4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_tu( @@ -706,7 +706,7 @@ vuint32m4_t test_vmacc_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vx_u32m4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_tu( @@ -715,7 +715,7 @@ vuint32m4_t test_vmacc_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vv_u32m8_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_tu( @@ -724,7 +724,7 @@ vuint32m8_t test_vmacc_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vx_u32m8_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_tu( @@ -733,7 +733,7 @@ vuint32m8_t test_vmacc_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vv_u64m1_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_tu( @@ -742,7 +742,7 @@ vuint64m1_t test_vmacc_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vx_u64m1_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_tu( @@ -751,7 +751,7 @@ vuint64m1_t test_vmacc_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vv_u64m2_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_tu( @@ -760,7 +760,7 @@ vuint64m2_t test_vmacc_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vx_u64m2_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_tu( @@ -769,7 +769,7 @@ vuint64m2_t test_vmacc_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vv_u64m4_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_tu( @@ -778,7 +778,7 @@ vuint64m4_t test_vmacc_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vx_u64m4_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_tu( @@ -787,7 +787,7 @@ vuint64m4_t test_vmacc_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vv_u64m8_tu(vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_tu( @@ -796,7 +796,7 @@ vuint64m8_t test_vmacc_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vx_u64m8_tu(vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_tum( @@ -805,7 +805,7 @@ vuint64m8_t test_vmacc_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_tum( @@ -814,7 +814,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_tum( @@ -823,7 +823,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_tum( @@ -832,7 +832,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_tum( @@ -841,7 +841,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_tum( @@ -850,7 +850,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_tum( @@ -859,7 +859,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmacc_vv_i8m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_tum( @@ -868,7 +868,7 @@ vint8m1_t test_vmacc_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmacc_vx_i8m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_tum( @@ -877,7 +877,7 @@ vint8m1_t test_vmacc_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmacc_vv_i8m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_tum( @@ -886,7 +886,7 @@ vint8m2_t test_vmacc_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmacc_vx_i8m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_tum( @@ -895,7 +895,7 @@ vint8m2_t test_vmacc_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmacc_vv_i8m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_tum( @@ -904,7 +904,7 @@ vint8m4_t test_vmacc_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmacc_vx_i8m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_tum( @@ -913,7 +913,7 @@ vint8m4_t test_vmacc_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmacc_vv_i8m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_tum( @@ -922,7 +922,7 @@ vint8m8_t test_vmacc_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmacc_vx_i8m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_tum( @@ -931,7 +931,7 @@ vint8m8_t test_vmacc_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_tum( @@ -940,7 +940,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_tum( @@ -949,7 +949,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_tum( @@ -958,7 +958,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_tum( @@ -967,7 +967,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmacc_vv_i16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_tum( @@ -976,7 +976,7 @@ vint16m1_t test_vmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmacc_vx_i16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_tum( @@ -985,7 +985,7 @@ vint16m1_t test_vmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmacc_vv_i16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_tum( @@ -994,7 +994,7 @@ vint16m2_t test_vmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmacc_vx_i16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_tum( @@ -1003,7 +1003,7 @@ vint16m2_t test_vmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmacc_vv_i16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_tum( @@ -1012,7 +1012,7 @@ vint16m4_t test_vmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmacc_vx_i16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_tum( @@ -1021,7 +1021,7 @@ vint16m4_t test_vmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmacc_vv_i16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_tum( @@ -1030,7 +1030,7 @@ vint16m8_t test_vmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmacc_vx_i16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tum( @@ -1039,7 +1039,7 @@ vint16m8_t test_vmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tum( @@ -1048,7 +1048,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_tum( @@ -1057,7 +1057,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmacc_vv_i32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_tum( @@ -1066,7 +1066,7 @@ vint32m1_t test_vmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmacc_vx_i32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_tum( @@ -1075,7 +1075,7 @@ vint32m1_t test_vmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmacc_vv_i32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_tum( @@ -1084,7 +1084,7 @@ vint32m2_t test_vmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmacc_vx_i32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_tum( @@ -1093,7 +1093,7 @@ vint32m2_t test_vmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmacc_vv_i32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_tum( @@ -1102,7 +1102,7 @@ vint32m4_t test_vmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmacc_vx_i32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_tum( @@ -1111,7 +1111,7 @@ vint32m4_t test_vmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmacc_vv_i32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_tum( @@ -1120,7 +1120,7 @@ vint32m8_t test_vmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmacc_vx_i32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_tum( @@ -1129,7 +1129,7 @@ vint32m8_t test_vmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmacc_vv_i64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_tum( @@ -1138,7 +1138,7 @@ vint64m1_t test_vmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmacc_vx_i64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_tum( @@ -1147,7 +1147,7 @@ vint64m1_t test_vmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmacc_vv_i64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_tum( @@ -1156,7 +1156,7 @@ vint64m2_t test_vmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmacc_vx_i64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_tum( @@ -1165,7 +1165,7 @@ vint64m2_t test_vmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmacc_vv_i64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_tum( @@ -1174,7 +1174,7 @@ vint64m4_t test_vmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmacc_vx_i64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_tum( @@ -1183,7 +1183,7 @@ vint64m4_t test_vmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmacc_vv_i64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_tum( @@ -1192,7 +1192,7 @@ vint64m8_t test_vmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmacc_vx_i64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_tum( @@ -1201,7 +1201,7 @@ vint64m8_t test_vmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_tum( @@ -1210,7 +1210,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_tum( @@ -1219,7 +1219,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_tum( @@ -1228,7 +1228,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_tum( @@ -1237,7 +1237,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_tum( @@ -1246,7 +1246,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_tum( @@ -1255,7 +1255,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vv_u8m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_tum( @@ -1264,7 +1264,7 @@ vuint8m1_t test_vmacc_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vx_u8m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_tum( @@ -1273,7 +1273,7 @@ vuint8m1_t test_vmacc_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vv_u8m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_tum( @@ -1282,7 +1282,7 @@ vuint8m2_t test_vmacc_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vx_u8m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_tum( @@ -1291,7 +1291,7 @@ vuint8m2_t test_vmacc_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vv_u8m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_tum( @@ -1300,7 +1300,7 @@ vuint8m4_t test_vmacc_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vx_u8m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_tum( @@ -1309,7 +1309,7 @@ vuint8m4_t test_vmacc_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vv_u8m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_tum( @@ -1318,7 +1318,7 @@ vuint8m8_t test_vmacc_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vx_u8m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_tum( @@ -1327,7 +1327,7 @@ vuint8m8_t test_vmacc_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_tum( @@ -1336,7 +1336,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_tum( @@ -1345,7 +1345,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_tum( @@ -1354,7 +1354,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_tum( @@ -1363,7 +1363,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vv_u16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_tum( @@ -1372,7 +1372,7 @@ vuint16m1_t test_vmacc_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vx_u16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_tum( @@ -1381,7 +1381,7 @@ vuint16m1_t test_vmacc_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vv_u16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_tum( @@ -1390,7 +1390,7 @@ vuint16m2_t test_vmacc_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vx_u16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_tum( @@ -1399,7 +1399,7 @@ vuint16m2_t test_vmacc_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vv_u16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_tum( @@ -1408,7 +1408,7 @@ vuint16m4_t test_vmacc_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vx_u16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_tum( @@ -1417,7 +1417,7 @@ vuint16m4_t test_vmacc_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vv_u16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_tum( @@ -1426,7 +1426,7 @@ vuint16m8_t test_vmacc_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vx_u16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tum( @@ -1435,7 +1435,7 @@ vuint16m8_t test_vmacc_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tum( @@ -1444,7 +1444,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_tum( @@ -1453,7 +1453,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vv_u32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_tum( @@ -1462,7 +1462,7 @@ vuint32m1_t test_vmacc_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vx_u32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_tum( @@ -1471,7 +1471,7 @@ vuint32m1_t test_vmacc_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vv_u32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_tum( @@ -1480,7 +1480,7 @@ vuint32m2_t test_vmacc_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vx_u32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_tum( @@ -1489,7 +1489,7 @@ vuint32m2_t test_vmacc_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vv_u32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_tum( @@ -1498,7 +1498,7 @@ vuint32m4_t test_vmacc_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vx_u32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_tum( @@ -1507,7 +1507,7 @@ vuint32m4_t test_vmacc_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vv_u32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_tum( @@ -1516,7 +1516,7 @@ vuint32m8_t test_vmacc_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vx_u32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_tum( @@ -1525,7 +1525,7 @@ vuint32m8_t test_vmacc_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vv_u64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_tum( @@ -1534,7 +1534,7 @@ vuint64m1_t test_vmacc_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vx_u64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_tum( @@ -1543,7 +1543,7 @@ vuint64m1_t test_vmacc_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vv_u64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_tum( @@ -1552,7 +1552,7 @@ vuint64m2_t test_vmacc_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vx_u64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_tum( @@ -1561,7 +1561,7 @@ vuint64m2_t test_vmacc_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vv_u64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_tum( @@ -1570,7 +1570,7 @@ vuint64m4_t test_vmacc_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vx_u64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_tum( @@ -1579,7 +1579,7 @@ vuint64m4_t test_vmacc_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vv_u64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_tum( @@ -1588,7 +1588,7 @@ vuint64m8_t test_vmacc_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vx_u64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_tumu( @@ -1597,7 +1597,7 @@ vuint64m8_t test_vmacc_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_tumu( @@ -1606,7 +1606,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_tumu( @@ -1615,7 +1615,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_tumu( @@ -1624,7 +1624,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_tumu( @@ -1633,7 +1633,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_tumu( @@ -1642,7 +1642,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_tumu( @@ -1651,7 +1651,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmacc_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_tumu( @@ -1660,7 +1660,7 @@ vint8m1_t test_vmacc_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmacc_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_tumu( @@ -1669,7 +1669,7 @@ vint8m1_t test_vmacc_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmacc_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_tumu( @@ -1678,7 +1678,7 @@ vint8m2_t test_vmacc_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmacc_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_tumu( @@ -1687,7 +1687,7 @@ vint8m2_t test_vmacc_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmacc_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_tumu( @@ -1696,7 +1696,7 @@ vint8m4_t test_vmacc_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmacc_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_tumu( @@ -1705,7 +1705,7 @@ vint8m4_t test_vmacc_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmacc_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_tumu( @@ -1714,7 +1714,7 @@ vint8m8_t test_vmacc_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmacc_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_tumu( @@ -1723,7 +1723,7 @@ vint8m8_t test_vmacc_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_tumu( @@ -1732,7 +1732,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_tumu( @@ -1741,7 +1741,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_tumu( @@ -1750,7 +1750,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_tumu( @@ -1759,7 +1759,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmacc_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_tumu( @@ -1768,7 +1768,7 @@ vint16m1_t test_vmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmacc_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_tumu( @@ -1777,7 +1777,7 @@ vint16m1_t test_vmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmacc_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_tumu( @@ -1786,7 +1786,7 @@ vint16m2_t test_vmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmacc_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_tumu( @@ -1795,7 +1795,7 @@ vint16m2_t test_vmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmacc_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_tumu( @@ -1804,7 +1804,7 @@ vint16m4_t test_vmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmacc_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_tumu( @@ -1813,7 +1813,7 @@ vint16m4_t test_vmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmacc_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_tumu( @@ -1822,7 +1822,7 @@ vint16m8_t test_vmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmacc_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_tumu( @@ -1831,7 +1831,7 @@ vint16m8_t test_vmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_tumu( @@ -1840,7 +1840,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_tumu( @@ -1849,7 +1849,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmacc_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_tumu( @@ -1858,7 +1858,7 @@ vint32m1_t test_vmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmacc_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_tumu( @@ -1867,7 +1867,7 @@ vint32m1_t test_vmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmacc_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_tumu( @@ -1876,7 +1876,7 @@ vint32m2_t test_vmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmacc_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_tumu( @@ -1885,7 +1885,7 @@ vint32m2_t test_vmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmacc_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_tumu( @@ -1894,7 +1894,7 @@ vint32m4_t test_vmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmacc_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_tumu( @@ -1903,7 +1903,7 @@ vint32m4_t test_vmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmacc_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_tumu( @@ -1912,7 +1912,7 @@ vint32m8_t test_vmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmacc_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_tumu( @@ -1921,7 +1921,7 @@ vint32m8_t test_vmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmacc_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_tumu( @@ -1930,7 +1930,7 @@ vint64m1_t test_vmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmacc_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_tumu( @@ -1939,7 +1939,7 @@ vint64m1_t test_vmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmacc_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_tumu( @@ -1948,7 +1948,7 @@ vint64m2_t test_vmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmacc_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_tumu( @@ -1957,7 +1957,7 @@ vint64m2_t test_vmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmacc_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_tumu( @@ -1966,7 +1966,7 @@ vint64m4_t test_vmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmacc_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_tumu( @@ -1975,7 +1975,7 @@ vint64m4_t test_vmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmacc_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_tumu( @@ -1984,7 +1984,7 @@ vint64m8_t test_vmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmacc_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_tumu( @@ -1993,7 +1993,7 @@ vint64m8_t test_vmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_tumu( @@ -2002,7 +2002,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_tumu( @@ -2011,7 +2011,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_tumu( @@ -2020,7 +2020,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_tumu( @@ -2029,7 +2029,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_tumu( @@ -2038,7 +2038,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_tumu( @@ -2047,7 +2047,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_tumu( @@ -2056,7 +2056,7 @@ vuint8m1_t test_vmacc_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_tumu( @@ -2065,7 +2065,7 @@ vuint8m1_t test_vmacc_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_tumu( @@ -2074,7 +2074,7 @@ vuint8m2_t test_vmacc_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_tumu( @@ -2083,7 +2083,7 @@ vuint8m2_t test_vmacc_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_tumu( @@ -2092,7 +2092,7 @@ vuint8m4_t test_vmacc_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_tumu( @@ -2101,7 +2101,7 @@ vuint8m4_t test_vmacc_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_tumu( @@ -2110,7 +2110,7 @@ vuint8m8_t test_vmacc_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_tumu( @@ -2119,7 +2119,7 @@ vuint8m8_t test_vmacc_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_tumu( @@ -2128,7 +2128,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_tumu( @@ -2137,7 +2137,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_tumu( @@ -2146,7 +2146,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_tumu( @@ -2155,7 +2155,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_tumu( @@ -2164,7 +2164,7 @@ vuint16m1_t test_vmacc_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_tumu( @@ -2173,7 +2173,7 @@ vuint16m1_t test_vmacc_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_tumu( @@ -2182,7 +2182,7 @@ vuint16m2_t test_vmacc_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_tumu( @@ -2191,7 +2191,7 @@ vuint16m2_t test_vmacc_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_tumu( @@ -2200,7 +2200,7 @@ vuint16m4_t test_vmacc_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_tumu( @@ -2209,7 +2209,7 @@ vuint16m4_t test_vmacc_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_tumu( @@ -2218,7 +2218,7 @@ vuint16m8_t test_vmacc_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_tumu( @@ -2227,7 +2227,7 @@ vuint16m8_t test_vmacc_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_tumu( @@ -2236,7 +2236,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_tumu( @@ -2245,7 +2245,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_tumu( @@ -2254,7 +2254,7 @@ vuint32m1_t test_vmacc_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_tumu( @@ -2263,7 +2263,7 @@ vuint32m1_t test_vmacc_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_tumu( @@ -2272,7 +2272,7 @@ vuint32m2_t test_vmacc_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_tumu( @@ -2281,7 +2281,7 @@ vuint32m2_t test_vmacc_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_tumu( @@ -2290,7 +2290,7 @@ vuint32m4_t test_vmacc_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_tumu( @@ -2299,7 +2299,7 @@ vuint32m4_t test_vmacc_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_tumu( @@ -2308,7 +2308,7 @@ vuint32m8_t test_vmacc_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_tumu( @@ -2317,7 +2317,7 @@ vuint32m8_t test_vmacc_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_tumu( @@ -2326,7 +2326,7 @@ vuint64m1_t test_vmacc_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_tumu( @@ -2335,7 +2335,7 @@ vuint64m1_t test_vmacc_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_tumu( @@ -2344,7 +2344,7 @@ vuint64m2_t test_vmacc_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_tumu( @@ -2353,7 +2353,7 @@ vuint64m2_t test_vmacc_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_tumu( @@ -2362,7 +2362,7 @@ vuint64m4_t test_vmacc_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_tumu( @@ -2371,7 +2371,7 @@ vuint64m4_t test_vmacc_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_tumu( @@ -2380,7 +2380,7 @@ vuint64m8_t test_vmacc_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_mu( @@ -2389,7 +2389,7 @@ vuint64m8_t test_vmacc_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_mu( @@ -2398,7 +2398,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmacc_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_mu( @@ -2407,7 +2407,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_mu( @@ -2416,7 +2416,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmacc_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_mu( @@ -2425,7 +2425,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_mu( @@ -2434,7 +2434,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmacc_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_mu( @@ -2443,7 +2443,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmacc_vv_i8m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_mu( @@ -2452,7 +2452,7 @@ vint8m1_t test_vmacc_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmacc_vx_i8m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_mu( @@ -2461,7 +2461,7 @@ vint8m1_t test_vmacc_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmacc_vv_i8m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_mu( @@ -2470,7 +2470,7 @@ vint8m2_t test_vmacc_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmacc_vx_i8m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_mu( @@ -2479,7 +2479,7 @@ vint8m2_t test_vmacc_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmacc_vv_i8m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_mu( @@ -2488,7 +2488,7 @@ vint8m4_t test_vmacc_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmacc_vx_i8m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_mu( @@ -2497,7 +2497,7 @@ vint8m4_t test_vmacc_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmacc_vv_i8m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i8m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_mu( @@ -2506,7 +2506,7 @@ vint8m8_t test_vmacc_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmacc_vx_i8m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i8m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_mu( @@ -2515,7 +2515,7 @@ vint8m8_t test_vmacc_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_mu( @@ -2524,7 +2524,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmacc_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_mu( @@ -2533,7 +2533,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_mu( @@ -2542,7 +2542,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmacc_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_mu( @@ -2551,7 +2551,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmacc_vv_i16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_mu( @@ -2560,7 +2560,7 @@ vint16m1_t test_vmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmacc_vx_i16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_mu( @@ -2569,7 +2569,7 @@ vint16m1_t test_vmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmacc_vv_i16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_mu( @@ -2578,7 +2578,7 @@ vint16m2_t test_vmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmacc_vx_i16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_mu( @@ -2587,7 +2587,7 @@ vint16m2_t test_vmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmacc_vv_i16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_mu( @@ -2596,7 +2596,7 @@ vint16m4_t test_vmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmacc_vx_i16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_mu( @@ -2605,7 +2605,7 @@ vint16m4_t test_vmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmacc_vv_i16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_mu( @@ -2614,7 +2614,7 @@ vint16m8_t test_vmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmacc_vx_i16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_mu( @@ -2623,7 +2623,7 @@ vint16m8_t test_vmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_mu( @@ -2632,7 +2632,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmacc_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_mu( @@ -2641,7 +2641,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmacc_vv_i32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_mu( @@ -2650,7 +2650,7 @@ vint32m1_t test_vmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmacc_vx_i32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_mu( @@ -2659,7 +2659,7 @@ vint32m1_t test_vmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmacc_vv_i32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_mu( @@ -2668,7 +2668,7 @@ vint32m2_t test_vmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmacc_vx_i32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_mu( @@ -2677,7 +2677,7 @@ vint32m2_t test_vmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmacc_vv_i32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_mu( @@ -2686,7 +2686,7 @@ vint32m4_t test_vmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmacc_vx_i32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_mu( @@ -2695,7 +2695,7 @@ vint32m4_t test_vmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmacc_vv_i32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_mu( @@ -2704,7 +2704,7 @@ vint32m8_t test_vmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmacc_vx_i32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_mu( @@ -2713,7 +2713,7 @@ vint32m8_t test_vmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmacc_vv_i64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_mu( @@ -2722,7 +2722,7 @@ vint64m1_t test_vmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmacc_vx_i64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_mu( @@ -2731,7 +2731,7 @@ vint64m1_t test_vmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmacc_vv_i64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_mu( @@ -2740,7 +2740,7 @@ vint64m2_t test_vmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmacc_vx_i64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_mu( @@ -2749,7 +2749,7 @@ vint64m2_t test_vmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmacc_vv_i64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_mu( @@ -2758,7 +2758,7 @@ vint64m4_t test_vmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmacc_vx_i64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_mu( @@ -2767,7 +2767,7 @@ vint64m4_t test_vmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmacc_vv_i64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_i64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_mu( @@ -2776,7 +2776,7 @@ vint64m8_t test_vmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmacc_vx_i64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_i64m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_mu( @@ -2785,7 +2785,7 @@ vint64m8_t test_vmacc_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_mu( @@ -2794,7 +2794,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmacc_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_mu( @@ -2803,7 +2803,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_mu( @@ -2812,7 +2812,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmacc_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_mu( @@ -2821,7 +2821,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_mu( @@ -2830,7 +2830,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmacc_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_mu( @@ -2839,7 +2839,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vv_u8m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_mu( @@ -2848,7 +2848,7 @@ vuint8m1_t test_vmacc_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmacc_vx_u8m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_mu( @@ -2857,7 +2857,7 @@ vuint8m1_t test_vmacc_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vv_u8m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_mu( @@ -2866,7 +2866,7 @@ vuint8m2_t test_vmacc_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmacc_vx_u8m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_mu( @@ -2875,7 +2875,7 @@ vuint8m2_t test_vmacc_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vv_u8m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_mu( @@ -2884,7 +2884,7 @@ vuint8m4_t test_vmacc_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmacc_vx_u8m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_mu( @@ -2893,7 +2893,7 @@ vuint8m4_t test_vmacc_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vv_u8m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u8m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_mu( @@ -2902,7 +2902,7 @@ vuint8m8_t test_vmacc_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmacc_vx_u8m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u8m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_mu( @@ -2911,7 +2911,7 @@ vuint8m8_t test_vmacc_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_mu( @@ -2920,7 +2920,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmacc_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_mu( @@ -2929,7 +2929,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_mu( @@ -2938,7 +2938,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmacc_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_mu( @@ -2947,7 +2947,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vv_u16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_mu( @@ -2956,7 +2956,7 @@ vuint16m1_t test_vmacc_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmacc_vx_u16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_mu( @@ -2965,7 +2965,7 @@ vuint16m1_t test_vmacc_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vv_u16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_mu( @@ -2974,7 +2974,7 @@ vuint16m2_t test_vmacc_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmacc_vx_u16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_mu( @@ -2983,7 +2983,7 @@ vuint16m2_t test_vmacc_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vv_u16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_mu( @@ -2992,7 +2992,7 @@ vuint16m4_t test_vmacc_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmacc_vx_u16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_mu( @@ -3001,7 +3001,7 @@ vuint16m4_t test_vmacc_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vv_u16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_mu( @@ -3010,7 +3010,7 @@ vuint16m8_t test_vmacc_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmacc_vx_u16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_mu( @@ -3019,7 +3019,7 @@ vuint16m8_t test_vmacc_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_mu( @@ -3028,7 +3028,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmacc_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_mu( @@ -3037,7 +3037,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vv_u32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_mu( @@ -3046,7 +3046,7 @@ vuint32m1_t test_vmacc_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmacc_vx_u32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_mu( @@ -3055,7 +3055,7 @@ vuint32m1_t test_vmacc_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vv_u32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_mu( @@ -3064,7 +3064,7 @@ vuint32m2_t test_vmacc_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmacc_vx_u32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_mu( @@ -3073,7 +3073,7 @@ vuint32m2_t test_vmacc_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vv_u32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_mu( @@ -3082,7 +3082,7 @@ vuint32m4_t test_vmacc_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmacc_vx_u32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_mu( @@ -3091,7 +3091,7 @@ vuint32m4_t test_vmacc_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vv_u32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_mu( @@ -3100,7 +3100,7 @@ vuint32m8_t test_vmacc_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmacc_vx_u32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_mu( @@ -3109,7 +3109,7 @@ vuint32m8_t test_vmacc_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vv_u64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_mu( @@ -3118,7 +3118,7 @@ vuint64m1_t test_vmacc_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmacc_vx_u64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_mu( @@ -3127,7 +3127,7 @@ vuint64m1_t test_vmacc_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vv_u64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_mu( @@ -3136,7 +3136,7 @@ vuint64m2_t test_vmacc_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmacc_vx_u64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_mu( @@ -3145,7 +3145,7 @@ vuint64m2_t test_vmacc_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vv_u64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_mu( @@ -3154,7 +3154,7 @@ vuint64m4_t test_vmacc_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmacc_vx_u64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_mu( @@ -3163,7 +3163,7 @@ vuint64m4_t test_vmacc_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vv_u64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmacc_vv_u64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_mu( @@ -3172,6 +3172,6 @@ vuint64m8_t test_vmacc_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmacc_vx_u64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmacc_vx_u64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmadd.c index eee4c23b211d..696a8d54ff89 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmadd.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vv_i8mf8_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_tu( @@ -22,7 +22,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vx_i8mf8_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_tu( @@ -31,7 +31,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vv_i8mf4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_tu( @@ -40,7 +40,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vx_i8mf4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_tu( @@ -49,7 +49,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vv_i8mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_tu( @@ -58,7 +58,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vx_i8mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_tu( @@ -67,7 +67,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmadd_vv_i8m1_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_tu( @@ -76,7 +76,7 @@ vint8m1_t test_vmadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmadd_vx_i8m1_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_tu( @@ -85,7 +85,7 @@ vint8m1_t test_vmadd_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmadd_vv_i8m2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_tu( @@ -94,7 +94,7 @@ vint8m2_t test_vmadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmadd_vx_i8m2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_tu( @@ -103,7 +103,7 @@ vint8m2_t test_vmadd_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmadd_vv_i8m4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_tu( @@ -112,7 +112,7 @@ vint8m4_t test_vmadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmadd_vx_i8m4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_tu( @@ -121,7 +121,7 @@ vint8m4_t test_vmadd_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmadd_vv_i8m8_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_tu( @@ -130,7 +130,7 @@ vint8m8_t test_vmadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmadd_vx_i8m8_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_tu( @@ -139,7 +139,7 @@ vint8m8_t test_vmadd_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vv_i16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_tu( @@ -148,7 +148,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vx_i16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_tu( @@ -157,7 +157,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vv_i16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_tu( @@ -166,7 +166,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vx_i16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_tu( @@ -175,7 +175,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmadd_vv_i16m1_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_tu( @@ -184,7 +184,7 @@ vint16m1_t test_vmadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmadd_vx_i16m1_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_tu( @@ -193,7 +193,7 @@ vint16m1_t test_vmadd_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmadd_vv_i16m2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_tu( @@ -202,7 +202,7 @@ vint16m2_t test_vmadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmadd_vx_i16m2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_tu( @@ -211,7 +211,7 @@ vint16m2_t test_vmadd_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmadd_vv_i16m4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_tu( @@ -220,7 +220,7 @@ vint16m4_t test_vmadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmadd_vx_i16m4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_tu( @@ -229,7 +229,7 @@ vint16m4_t test_vmadd_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmadd_vv_i16m8_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_tu( @@ -238,7 +238,7 @@ vint16m8_t test_vmadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmadd_vx_i16m8_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tu( @@ -247,7 +247,7 @@ vint16m8_t test_vmadd_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vv_i32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tu( @@ -256,7 +256,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vx_i32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_tu( @@ -265,7 +265,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmadd_vv_i32m1_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_tu( @@ -274,7 +274,7 @@ vint32m1_t test_vmadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmadd_vx_i32m1_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_tu( @@ -283,7 +283,7 @@ vint32m1_t test_vmadd_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmadd_vv_i32m2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_tu( @@ -292,7 +292,7 @@ vint32m2_t test_vmadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmadd_vx_i32m2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_tu( @@ -301,7 +301,7 @@ vint32m2_t test_vmadd_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmadd_vv_i32m4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_tu( @@ -310,7 +310,7 @@ vint32m4_t test_vmadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmadd_vx_i32m4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_tu( @@ -319,7 +319,7 @@ vint32m4_t test_vmadd_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmadd_vv_i32m8_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_tu( @@ -328,7 +328,7 @@ vint32m8_t test_vmadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmadd_vx_i32m8_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_tu( @@ -337,7 +337,7 @@ vint32m8_t test_vmadd_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmadd_vv_i64m1_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_tu( @@ -346,7 +346,7 @@ vint64m1_t test_vmadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmadd_vx_i64m1_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_tu( @@ -355,7 +355,7 @@ vint64m1_t test_vmadd_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmadd_vv_i64m2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_tu( @@ -364,7 +364,7 @@ vint64m2_t test_vmadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmadd_vx_i64m2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_tu( @@ -373,7 +373,7 @@ vint64m2_t test_vmadd_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmadd_vv_i64m4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_tu( @@ -382,7 +382,7 @@ vint64m4_t test_vmadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmadd_vx_i64m4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_tu( @@ -391,7 +391,7 @@ vint64m4_t test_vmadd_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmadd_vv_i64m8_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_tu( @@ -400,7 +400,7 @@ vint64m8_t test_vmadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmadd_vx_i64m8_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_tu( @@ -409,7 +409,7 @@ vint64m8_t test_vmadd_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vv_u8mf8_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_tu( @@ -418,7 +418,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vx_u8mf8_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_tu( @@ -427,7 +427,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vv_u8mf4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_tu( @@ -436,7 +436,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vx_u8mf4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_tu( @@ -445,7 +445,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vv_u8mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_tu( @@ -454,7 +454,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vx_u8mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_tu( @@ -463,7 +463,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vv_u8m1_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_tu( @@ -472,7 +472,7 @@ vuint8m1_t test_vmadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vx_u8m1_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_tu( @@ -481,7 +481,7 @@ vuint8m1_t test_vmadd_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vv_u8m2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_tu( @@ -490,7 +490,7 @@ vuint8m2_t test_vmadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vx_u8m2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_tu( @@ -499,7 +499,7 @@ vuint8m2_t test_vmadd_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vv_u8m4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_tu( @@ -508,7 +508,7 @@ vuint8m4_t test_vmadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vx_u8m4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_tu( @@ -517,7 +517,7 @@ vuint8m4_t test_vmadd_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vv_u8m8_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_tu( @@ -526,7 +526,7 @@ vuint8m8_t test_vmadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vx_u8m8_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_tu( @@ -535,7 +535,7 @@ vuint8m8_t test_vmadd_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vv_u16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_tu( @@ -544,7 +544,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vx_u16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_tu( @@ -553,7 +553,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vv_u16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_tu( @@ -562,7 +562,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vx_u16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_tu( @@ -571,7 +571,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vv_u16m1_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_tu( @@ -580,7 +580,7 @@ vuint16m1_t test_vmadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vx_u16m1_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_tu( @@ -589,7 +589,7 @@ vuint16m1_t test_vmadd_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vv_u16m2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_tu( @@ -598,7 +598,7 @@ vuint16m2_t test_vmadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vx_u16m2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_tu( @@ -607,7 +607,7 @@ vuint16m2_t test_vmadd_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vv_u16m4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_tu( @@ -616,7 +616,7 @@ vuint16m4_t test_vmadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vx_u16m4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_tu( @@ -625,7 +625,7 @@ vuint16m4_t test_vmadd_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vv_u16m8_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_tu( @@ -634,7 +634,7 @@ vuint16m8_t test_vmadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vx_u16m8_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tu( @@ -643,7 +643,7 @@ vuint16m8_t test_vmadd_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vv_u32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tu( @@ -652,7 +652,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vx_u32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_tu( @@ -661,7 +661,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vv_u32m1_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_tu( @@ -670,7 +670,7 @@ vuint32m1_t test_vmadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vx_u32m1_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_tu( @@ -679,7 +679,7 @@ vuint32m1_t test_vmadd_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vv_u32m2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_tu( @@ -688,7 +688,7 @@ vuint32m2_t test_vmadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vx_u32m2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_tu( @@ -697,7 +697,7 @@ vuint32m2_t test_vmadd_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vv_u32m4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_tu( @@ -706,7 +706,7 @@ vuint32m4_t test_vmadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vx_u32m4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_tu( @@ -715,7 +715,7 @@ vuint32m4_t test_vmadd_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vv_u32m8_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_tu( @@ -724,7 +724,7 @@ vuint32m8_t test_vmadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vx_u32m8_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_tu( @@ -733,7 +733,7 @@ vuint32m8_t test_vmadd_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vv_u64m1_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_tu( @@ -742,7 +742,7 @@ vuint64m1_t test_vmadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vx_u64m1_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_tu( @@ -751,7 +751,7 @@ vuint64m1_t test_vmadd_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vv_u64m2_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_tu( @@ -760,7 +760,7 @@ vuint64m2_t test_vmadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vx_u64m2_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_tu( @@ -769,7 +769,7 @@ vuint64m2_t test_vmadd_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vv_u64m4_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_tu( @@ -778,7 +778,7 @@ vuint64m4_t test_vmadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vx_u64m4_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_tu( @@ -787,7 +787,7 @@ vuint64m4_t test_vmadd_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vv_u64m8_tu(vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_tu( @@ -796,7 +796,7 @@ vuint64m8_t test_vmadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vx_u64m8_tu(vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_tum( @@ -805,7 +805,7 @@ vuint64m8_t test_vmadd_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_tum( @@ -814,7 +814,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_tum( @@ -823,7 +823,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_tum( @@ -832,7 +832,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_tum( @@ -841,7 +841,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_tum( @@ -850,7 +850,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_tum( @@ -859,7 +859,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmadd_vv_i8m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_tum( @@ -868,7 +868,7 @@ vint8m1_t test_vmadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmadd_vx_i8m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_tum( @@ -877,7 +877,7 @@ vint8m1_t test_vmadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmadd_vv_i8m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_tum( @@ -886,7 +886,7 @@ vint8m2_t test_vmadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmadd_vx_i8m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_tum( @@ -895,7 +895,7 @@ vint8m2_t test_vmadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmadd_vv_i8m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_tum( @@ -904,7 +904,7 @@ vint8m4_t test_vmadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmadd_vx_i8m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_tum( @@ -913,7 +913,7 @@ vint8m4_t test_vmadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmadd_vv_i8m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_tum( @@ -922,7 +922,7 @@ vint8m8_t test_vmadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmadd_vx_i8m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_tum( @@ -931,7 +931,7 @@ vint8m8_t test_vmadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_tum( @@ -940,7 +940,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_tum( @@ -949,7 +949,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_tum( @@ -958,7 +958,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_tum( @@ -967,7 +967,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmadd_vv_i16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_tum( @@ -976,7 +976,7 @@ vint16m1_t test_vmadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmadd_vx_i16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_tum( @@ -985,7 +985,7 @@ vint16m1_t test_vmadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmadd_vv_i16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_tum( @@ -994,7 +994,7 @@ vint16m2_t test_vmadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmadd_vx_i16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_tum( @@ -1003,7 +1003,7 @@ vint16m2_t test_vmadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmadd_vv_i16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_tum( @@ -1012,7 +1012,7 @@ vint16m4_t test_vmadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmadd_vx_i16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_tum( @@ -1021,7 +1021,7 @@ vint16m4_t test_vmadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmadd_vv_i16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_tum( @@ -1030,7 +1030,7 @@ vint16m8_t test_vmadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmadd_vx_i16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tum( @@ -1039,7 +1039,7 @@ vint16m8_t test_vmadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tum( @@ -1048,7 +1048,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_tum( @@ -1057,7 +1057,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmadd_vv_i32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_tum( @@ -1066,7 +1066,7 @@ vint32m1_t test_vmadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmadd_vx_i32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_tum( @@ -1075,7 +1075,7 @@ vint32m1_t test_vmadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmadd_vv_i32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_tum( @@ -1084,7 +1084,7 @@ vint32m2_t test_vmadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmadd_vx_i32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_tum( @@ -1093,7 +1093,7 @@ vint32m2_t test_vmadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmadd_vv_i32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_tum( @@ -1102,7 +1102,7 @@ vint32m4_t test_vmadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmadd_vx_i32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_tum( @@ -1111,7 +1111,7 @@ vint32m4_t test_vmadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmadd_vv_i32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_tum( @@ -1120,7 +1120,7 @@ vint32m8_t test_vmadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmadd_vx_i32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_tum( @@ -1129,7 +1129,7 @@ vint32m8_t test_vmadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmadd_vv_i64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_tum( @@ -1138,7 +1138,7 @@ vint64m1_t test_vmadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmadd_vx_i64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_tum( @@ -1147,7 +1147,7 @@ vint64m1_t test_vmadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmadd_vv_i64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_tum( @@ -1156,7 +1156,7 @@ vint64m2_t test_vmadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmadd_vx_i64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_tum( @@ -1165,7 +1165,7 @@ vint64m2_t test_vmadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmadd_vv_i64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_tum( @@ -1174,7 +1174,7 @@ vint64m4_t test_vmadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmadd_vx_i64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_tum( @@ -1183,7 +1183,7 @@ vint64m4_t test_vmadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmadd_vv_i64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_tum( @@ -1192,7 +1192,7 @@ vint64m8_t test_vmadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmadd_vx_i64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_tum( @@ -1201,7 +1201,7 @@ vint64m8_t test_vmadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_tum( @@ -1210,7 +1210,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_tum( @@ -1219,7 +1219,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_tum( @@ -1228,7 +1228,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_tum( @@ -1237,7 +1237,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_tum( @@ -1246,7 +1246,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_tum( @@ -1255,7 +1255,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vv_u8m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_tum( @@ -1264,7 +1264,7 @@ vuint8m1_t test_vmadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vx_u8m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_tum( @@ -1273,7 +1273,7 @@ vuint8m1_t test_vmadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vv_u8m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_tum( @@ -1282,7 +1282,7 @@ vuint8m2_t test_vmadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vx_u8m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_tum( @@ -1291,7 +1291,7 @@ vuint8m2_t test_vmadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vv_u8m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_tum( @@ -1300,7 +1300,7 @@ vuint8m4_t test_vmadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vx_u8m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_tum( @@ -1309,7 +1309,7 @@ vuint8m4_t test_vmadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vv_u8m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_tum( @@ -1318,7 +1318,7 @@ vuint8m8_t test_vmadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vx_u8m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_tum( @@ -1327,7 +1327,7 @@ vuint8m8_t test_vmadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_tum( @@ -1336,7 +1336,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_tum( @@ -1345,7 +1345,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_tum( @@ -1354,7 +1354,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_tum( @@ -1363,7 +1363,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vv_u16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_tum( @@ -1372,7 +1372,7 @@ vuint16m1_t test_vmadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vx_u16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_tum( @@ -1381,7 +1381,7 @@ vuint16m1_t test_vmadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vv_u16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_tum( @@ -1390,7 +1390,7 @@ vuint16m2_t test_vmadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vx_u16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_tum( @@ -1399,7 +1399,7 @@ vuint16m2_t test_vmadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vv_u16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_tum( @@ -1408,7 +1408,7 @@ vuint16m4_t test_vmadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vx_u16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_tum( @@ -1417,7 +1417,7 @@ vuint16m4_t test_vmadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vv_u16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_tum( @@ -1426,7 +1426,7 @@ vuint16m8_t test_vmadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vx_u16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tum( @@ -1435,7 +1435,7 @@ vuint16m8_t test_vmadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tum( @@ -1444,7 +1444,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_tum( @@ -1453,7 +1453,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vv_u32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_tum( @@ -1462,7 +1462,7 @@ vuint32m1_t test_vmadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vx_u32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_tum( @@ -1471,7 +1471,7 @@ vuint32m1_t test_vmadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vv_u32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_tum( @@ -1480,7 +1480,7 @@ vuint32m2_t test_vmadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vx_u32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_tum( @@ -1489,7 +1489,7 @@ vuint32m2_t test_vmadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vv_u32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_tum( @@ -1498,7 +1498,7 @@ vuint32m4_t test_vmadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vx_u32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_tum( @@ -1507,7 +1507,7 @@ vuint32m4_t test_vmadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vv_u32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_tum( @@ -1516,7 +1516,7 @@ vuint32m8_t test_vmadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vx_u32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_tum( @@ -1525,7 +1525,7 @@ vuint32m8_t test_vmadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vv_u64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_tum( @@ -1534,7 +1534,7 @@ vuint64m1_t test_vmadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vx_u64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_tum( @@ -1543,7 +1543,7 @@ vuint64m1_t test_vmadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vv_u64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_tum( @@ -1552,7 +1552,7 @@ vuint64m2_t test_vmadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vx_u64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_tum( @@ -1561,7 +1561,7 @@ vuint64m2_t test_vmadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vv_u64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_tum( @@ -1570,7 +1570,7 @@ vuint64m4_t test_vmadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vx_u64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_tum( @@ -1579,7 +1579,7 @@ vuint64m4_t test_vmadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vv_u64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_tum( @@ -1588,7 +1588,7 @@ vuint64m8_t test_vmadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vx_u64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_tumu( @@ -1597,7 +1597,7 @@ vuint64m8_t test_vmadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_tumu( @@ -1606,7 +1606,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_tumu( @@ -1615,7 +1615,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_tumu( @@ -1624,7 +1624,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_tumu( @@ -1633,7 +1633,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_tumu( @@ -1642,7 +1642,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_tumu( @@ -1651,7 +1651,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmadd_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_tumu( @@ -1660,7 +1660,7 @@ vint8m1_t test_vmadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmadd_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_tumu( @@ -1669,7 +1669,7 @@ vint8m1_t test_vmadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmadd_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_tumu( @@ -1678,7 +1678,7 @@ vint8m2_t test_vmadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmadd_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_tumu( @@ -1687,7 +1687,7 @@ vint8m2_t test_vmadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmadd_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_tumu( @@ -1696,7 +1696,7 @@ vint8m4_t test_vmadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmadd_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_tumu( @@ -1705,7 +1705,7 @@ vint8m4_t test_vmadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmadd_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_tumu( @@ -1714,7 +1714,7 @@ vint8m8_t test_vmadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmadd_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_tumu( @@ -1723,7 +1723,7 @@ vint8m8_t test_vmadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_tumu( @@ -1732,7 +1732,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_tumu( @@ -1741,7 +1741,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_tumu( @@ -1750,7 +1750,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_tumu( @@ -1759,7 +1759,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmadd_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_tumu( @@ -1768,7 +1768,7 @@ vint16m1_t test_vmadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmadd_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_tumu( @@ -1777,7 +1777,7 @@ vint16m1_t test_vmadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmadd_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_tumu( @@ -1786,7 +1786,7 @@ vint16m2_t test_vmadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmadd_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_tumu( @@ -1795,7 +1795,7 @@ vint16m2_t test_vmadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmadd_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_tumu( @@ -1804,7 +1804,7 @@ vint16m4_t test_vmadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmadd_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_tumu( @@ -1813,7 +1813,7 @@ vint16m4_t test_vmadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmadd_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_tumu( @@ -1822,7 +1822,7 @@ vint16m8_t test_vmadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmadd_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_tumu( @@ -1831,7 +1831,7 @@ vint16m8_t test_vmadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_tumu( @@ -1840,7 +1840,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_tumu( @@ -1849,7 +1849,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmadd_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_tumu( @@ -1858,7 +1858,7 @@ vint32m1_t test_vmadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmadd_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_tumu( @@ -1867,7 +1867,7 @@ vint32m1_t test_vmadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmadd_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_tumu( @@ -1876,7 +1876,7 @@ vint32m2_t test_vmadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmadd_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_tumu( @@ -1885,7 +1885,7 @@ vint32m2_t test_vmadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmadd_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_tumu( @@ -1894,7 +1894,7 @@ vint32m4_t test_vmadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmadd_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_tumu( @@ -1903,7 +1903,7 @@ vint32m4_t test_vmadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmadd_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_tumu( @@ -1912,7 +1912,7 @@ vint32m8_t test_vmadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmadd_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_tumu( @@ -1921,7 +1921,7 @@ vint32m8_t test_vmadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmadd_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_tumu( @@ -1930,7 +1930,7 @@ vint64m1_t test_vmadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmadd_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_tumu( @@ -1939,7 +1939,7 @@ vint64m1_t test_vmadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmadd_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_tumu( @@ -1948,7 +1948,7 @@ vint64m2_t test_vmadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmadd_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_tumu( @@ -1957,7 +1957,7 @@ vint64m2_t test_vmadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmadd_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_tumu( @@ -1966,7 +1966,7 @@ vint64m4_t test_vmadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmadd_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_tumu( @@ -1975,7 +1975,7 @@ vint64m4_t test_vmadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmadd_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_tumu( @@ -1984,7 +1984,7 @@ vint64m8_t test_vmadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmadd_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_tumu( @@ -1993,7 +1993,7 @@ vint64m8_t test_vmadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_tumu( @@ -2002,7 +2002,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_tumu( @@ -2011,7 +2011,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_tumu( @@ -2020,7 +2020,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_tumu( @@ -2029,7 +2029,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_tumu( @@ -2038,7 +2038,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_tumu( @@ -2047,7 +2047,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_tumu( @@ -2056,7 +2056,7 @@ vuint8m1_t test_vmadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_tumu( @@ -2065,7 +2065,7 @@ vuint8m1_t test_vmadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_tumu( @@ -2074,7 +2074,7 @@ vuint8m2_t test_vmadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_tumu( @@ -2083,7 +2083,7 @@ vuint8m2_t test_vmadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_tumu( @@ -2092,7 +2092,7 @@ vuint8m4_t test_vmadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_tumu( @@ -2101,7 +2101,7 @@ vuint8m4_t test_vmadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_tumu( @@ -2110,7 +2110,7 @@ vuint8m8_t test_vmadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_tumu( @@ -2119,7 +2119,7 @@ vuint8m8_t test_vmadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_tumu( @@ -2128,7 +2128,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_tumu( @@ -2137,7 +2137,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_tumu( @@ -2146,7 +2146,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_tumu( @@ -2155,7 +2155,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_tumu( @@ -2164,7 +2164,7 @@ vuint16m1_t test_vmadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_tumu( @@ -2173,7 +2173,7 @@ vuint16m1_t test_vmadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_tumu( @@ -2182,7 +2182,7 @@ vuint16m2_t test_vmadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_tumu( @@ -2191,7 +2191,7 @@ vuint16m2_t test_vmadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_tumu( @@ -2200,7 +2200,7 @@ vuint16m4_t test_vmadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_tumu( @@ -2209,7 +2209,7 @@ vuint16m4_t test_vmadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_tumu( @@ -2218,7 +2218,7 @@ vuint16m8_t test_vmadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_tumu( @@ -2227,7 +2227,7 @@ vuint16m8_t test_vmadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_tumu( @@ -2236,7 +2236,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_tumu( @@ -2245,7 +2245,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_tumu( @@ -2254,7 +2254,7 @@ vuint32m1_t test_vmadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_tumu( @@ -2263,7 +2263,7 @@ vuint32m1_t test_vmadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_tumu( @@ -2272,7 +2272,7 @@ vuint32m2_t test_vmadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_tumu( @@ -2281,7 +2281,7 @@ vuint32m2_t test_vmadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_tumu( @@ -2290,7 +2290,7 @@ vuint32m4_t test_vmadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_tumu( @@ -2299,7 +2299,7 @@ vuint32m4_t test_vmadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_tumu( @@ -2308,7 +2308,7 @@ vuint32m8_t test_vmadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_tumu( @@ -2317,7 +2317,7 @@ vuint32m8_t test_vmadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_tumu( @@ -2326,7 +2326,7 @@ vuint64m1_t test_vmadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_tumu( @@ -2335,7 +2335,7 @@ vuint64m1_t test_vmadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_tumu( @@ -2344,7 +2344,7 @@ vuint64m2_t test_vmadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_tumu( @@ -2353,7 +2353,7 @@ vuint64m2_t test_vmadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_tumu( @@ -2362,7 +2362,7 @@ vuint64m4_t test_vmadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_tumu( @@ -2371,7 +2371,7 @@ vuint64m4_t test_vmadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_tumu( @@ -2380,7 +2380,7 @@ vuint64m8_t test_vmadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_mu( @@ -2389,7 +2389,7 @@ vuint64m8_t test_vmadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_mu( @@ -2398,7 +2398,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vmadd_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_mu( @@ -2407,7 +2407,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_mu( @@ -2416,7 +2416,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vmadd_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_mu( @@ -2425,7 +2425,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_mu( @@ -2434,7 +2434,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vmadd_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_mu( @@ -2443,7 +2443,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vmadd_vv_i8m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_mu( @@ -2452,7 +2452,7 @@ vint8m1_t test_vmadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vmadd_vx_i8m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_mu( @@ -2461,7 +2461,7 @@ vint8m1_t test_vmadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vmadd_vv_i8m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_mu( @@ -2470,7 +2470,7 @@ vint8m2_t test_vmadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vmadd_vx_i8m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_mu( @@ -2479,7 +2479,7 @@ vint8m2_t test_vmadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vmadd_vv_i8m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_mu( @@ -2488,7 +2488,7 @@ vint8m4_t test_vmadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vmadd_vx_i8m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_mu( @@ -2497,7 +2497,7 @@ vint8m4_t test_vmadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vmadd_vv_i8m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i8m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_mu( @@ -2506,7 +2506,7 @@ vint8m8_t test_vmadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vmadd_vx_i8m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i8m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_mu( @@ -2515,7 +2515,7 @@ vint8m8_t test_vmadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_mu( @@ -2524,7 +2524,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vmadd_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_mu( @@ -2533,7 +2533,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_mu( @@ -2542,7 +2542,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vmadd_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_mu( @@ -2551,7 +2551,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vmadd_vv_i16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_mu( @@ -2560,7 +2560,7 @@ vint16m1_t test_vmadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vmadd_vx_i16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_mu( @@ -2569,7 +2569,7 @@ vint16m1_t test_vmadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vmadd_vv_i16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_mu( @@ -2578,7 +2578,7 @@ vint16m2_t test_vmadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vmadd_vx_i16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_mu( @@ -2587,7 +2587,7 @@ vint16m2_t test_vmadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vmadd_vv_i16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_mu( @@ -2596,7 +2596,7 @@ vint16m4_t test_vmadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vmadd_vx_i16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_mu( @@ -2605,7 +2605,7 @@ vint16m4_t test_vmadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vmadd_vv_i16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_mu( @@ -2614,7 +2614,7 @@ vint16m8_t test_vmadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vmadd_vx_i16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_mu( @@ -2623,7 +2623,7 @@ vint16m8_t test_vmadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_mu( @@ -2632,7 +2632,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vmadd_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_mu( @@ -2641,7 +2641,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vmadd_vv_i32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_mu( @@ -2650,7 +2650,7 @@ vint32m1_t test_vmadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vmadd_vx_i32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_mu( @@ -2659,7 +2659,7 @@ vint32m1_t test_vmadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vmadd_vv_i32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_mu( @@ -2668,7 +2668,7 @@ vint32m2_t test_vmadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vmadd_vx_i32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_mu( @@ -2677,7 +2677,7 @@ vint32m2_t test_vmadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vmadd_vv_i32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_mu( @@ -2686,7 +2686,7 @@ vint32m4_t test_vmadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vmadd_vx_i32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_mu( @@ -2695,7 +2695,7 @@ vint32m4_t test_vmadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vmadd_vv_i32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_mu( @@ -2704,7 +2704,7 @@ vint32m8_t test_vmadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vmadd_vx_i32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_mu( @@ -2713,7 +2713,7 @@ vint32m8_t test_vmadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vmadd_vv_i64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_mu( @@ -2722,7 +2722,7 @@ vint64m1_t test_vmadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vmadd_vx_i64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_mu( @@ -2731,7 +2731,7 @@ vint64m1_t test_vmadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vmadd_vv_i64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_mu( @@ -2740,7 +2740,7 @@ vint64m2_t test_vmadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vmadd_vx_i64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_mu( @@ -2749,7 +2749,7 @@ vint64m2_t test_vmadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vmadd_vv_i64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_mu( @@ -2758,7 +2758,7 @@ vint64m4_t test_vmadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vmadd_vx_i64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_mu( @@ -2767,7 +2767,7 @@ vint64m4_t test_vmadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vmadd_vv_i64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_i64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_mu( @@ -2776,7 +2776,7 @@ vint64m8_t test_vmadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vmadd_vx_i64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_i64m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_mu( @@ -2785,7 +2785,7 @@ vint64m8_t test_vmadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_mu( @@ -2794,7 +2794,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vmadd_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_mu( @@ -2803,7 +2803,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_mu( @@ -2812,7 +2812,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vmadd_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_mu( @@ -2821,7 +2821,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_mu( @@ -2830,7 +2830,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vmadd_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_mu( @@ -2839,7 +2839,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vv_u8m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_mu( @@ -2848,7 +2848,7 @@ vuint8m1_t test_vmadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vmadd_vx_u8m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_mu( @@ -2857,7 +2857,7 @@ vuint8m1_t test_vmadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vv_u8m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_mu( @@ -2866,7 +2866,7 @@ vuint8m2_t test_vmadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vmadd_vx_u8m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_mu( @@ -2875,7 +2875,7 @@ vuint8m2_t test_vmadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vv_u8m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_mu( @@ -2884,7 +2884,7 @@ vuint8m4_t test_vmadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vmadd_vx_u8m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_mu( @@ -2893,7 +2893,7 @@ vuint8m4_t test_vmadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vv_u8m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u8m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_mu( @@ -2902,7 +2902,7 @@ vuint8m8_t test_vmadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vmadd_vx_u8m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u8m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_mu( @@ -2911,7 +2911,7 @@ vuint8m8_t test_vmadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_mu( @@ -2920,7 +2920,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vmadd_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_mu( @@ -2929,7 +2929,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_mu( @@ -2938,7 +2938,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vmadd_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_mu( @@ -2947,7 +2947,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vv_u16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_mu( @@ -2956,7 +2956,7 @@ vuint16m1_t test_vmadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vmadd_vx_u16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_mu( @@ -2965,7 +2965,7 @@ vuint16m1_t test_vmadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vv_u16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_mu( @@ -2974,7 +2974,7 @@ vuint16m2_t test_vmadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vmadd_vx_u16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_mu( @@ -2983,7 +2983,7 @@ vuint16m2_t test_vmadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vv_u16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_mu( @@ -2992,7 +2992,7 @@ vuint16m4_t test_vmadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vmadd_vx_u16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_mu( @@ -3001,7 +3001,7 @@ vuint16m4_t test_vmadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vv_u16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_mu( @@ -3010,7 +3010,7 @@ vuint16m8_t test_vmadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vmadd_vx_u16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_mu( @@ -3019,7 +3019,7 @@ vuint16m8_t test_vmadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_mu( @@ -3028,7 +3028,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vmadd_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_mu( @@ -3037,7 +3037,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vv_u32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_mu( @@ -3046,7 +3046,7 @@ vuint32m1_t test_vmadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vmadd_vx_u32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_mu( @@ -3055,7 +3055,7 @@ vuint32m1_t test_vmadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vv_u32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_mu( @@ -3064,7 +3064,7 @@ vuint32m2_t test_vmadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vmadd_vx_u32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_mu( @@ -3073,7 +3073,7 @@ vuint32m2_t test_vmadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vv_u32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_mu( @@ -3082,7 +3082,7 @@ vuint32m4_t test_vmadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vmadd_vx_u32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_mu( @@ -3091,7 +3091,7 @@ vuint32m4_t test_vmadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vv_u32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_mu( @@ -3100,7 +3100,7 @@ vuint32m8_t test_vmadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vmadd_vx_u32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_mu( @@ -3109,7 +3109,7 @@ vuint32m8_t test_vmadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vv_u64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_mu( @@ -3118,7 +3118,7 @@ vuint64m1_t test_vmadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vmadd_vx_u64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_mu( @@ -3127,7 +3127,7 @@ vuint64m1_t test_vmadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vv_u64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_mu( @@ -3136,7 +3136,7 @@ vuint64m2_t test_vmadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vmadd_vx_u64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_mu( @@ -3145,7 +3145,7 @@ vuint64m2_t test_vmadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vv_u64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_mu( @@ -3154,7 +3154,7 @@ vuint64m4_t test_vmadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vmadd_vx_u64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_mu( @@ -3163,7 +3163,7 @@ vuint64m4_t test_vmadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vv_u64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vmadd_vv_u64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_mu( @@ -3172,6 +3172,6 @@ vuint64m8_t test_vmadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vmadd_vx_u64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vmadd_vx_u64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmax.c index a1a92638e816..a5fd9fbb53d0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmax.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmax_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vmax_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vmax_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmax_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vmax_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vmax_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmax_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vmax_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vmax_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmax_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vmax_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vmax_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmax_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vmax_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vmax_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmax_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vmax_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vmax_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmax_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vmax_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vmax_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmax_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vmax_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vmax_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmax_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vmax_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vmax_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmax_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vmax_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vmax_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmax_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vmax_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vmax_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmax_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vmax_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vmax_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmax_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vmax_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vmax_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmax_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmax_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vmax_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vmax_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmax_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vmax_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vmax_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmax_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vmax_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vmax_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmax_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vmax_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vmax_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmax_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vmax_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vmax_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmax_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vmax_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vmax_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmax_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vmax_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vmax_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmax_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vmax_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vmax_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmax_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vmax_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vmax_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmax_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vmax_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vmax_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmax_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vmax_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vmax_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmax_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vmax_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vmax_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmax_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vmax_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vmax_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmax_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vmax_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vmax_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmax_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vmax_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vmax_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmax_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vmax_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vmax_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmax_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vmax_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vmax_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmax_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vmax_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vmax_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmax_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vmax_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vmax_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmax_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vmax_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vmax_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmax_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vmax_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vmax_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmax_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vmax_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vmax_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmax_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vmax_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vmax_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmax_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vmax_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vmax_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmax_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vmax_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vmax_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmax_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vmax_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vmax_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmax_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vmax_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vmax_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmax_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vmax_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vmax_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmax_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vmax_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vmax_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmax_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vmax_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vmax_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmax_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vmax_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vmax_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmax_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vmax_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vmax_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmax_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vmax_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vmax_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmax_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vmax_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vmax_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmax_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vmax_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vmax_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmax_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vmax_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vmax_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmax_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vmax_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vmax_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmax_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vmax_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vmax_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmax_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vmax_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vmax_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmax_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vmax_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vmax_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmax_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vmax_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vmax_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmax_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vmax_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vmax_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmax_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vmax_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vmax_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmax_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmax_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vmax_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vmax_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmax_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vmax_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vmax_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmax_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vmax_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vmax_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmax_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vmax_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vmax_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmax_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vmax_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vmax_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmax_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vmax_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vmax_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmax_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vmax_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vmax_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmax_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vmax_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vmax_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmax_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vmax_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vmax_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmax_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vmax_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vmax_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmax_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vmax_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vmax_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmax_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vmax_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vmax_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmax_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vmax_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vmax_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmax_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vmax_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vmax_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmax_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vmax_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmax_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vmax_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmax_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vmax_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vmax_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmax_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vmax_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vmax_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmax_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vmax_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vmax_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmax_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vmax_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vmax_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmax_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vmax_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vmax_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmax_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vmax_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmax_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vmax_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmax_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vmax_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vmax_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmax_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vmax_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vmax_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmax_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vmax_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vmax_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmax_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vmax_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vmax_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmax_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vmax_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmax_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vmax_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmax_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vmax_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vmax_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmax_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vmax_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vmax_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmax_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vmax_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vmax_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmax_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vmax_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmax_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmax_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmaxu.c index d4e93d710621..df457248cb0a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmaxu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmaxu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmaxu_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmaxu_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmaxu_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmaxu_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vmaxu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vmaxu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmaxu_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vmaxu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vmaxu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmaxu_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vmaxu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vmaxu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmaxu_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m8_t test_vmaxu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_tu( @@ -138,7 +138,7 @@ vuint8m8_t test_vmaxu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmaxu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_tu( @@ -147,7 +147,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_tu( @@ -156,7 +156,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmaxu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_tu( @@ -165,7 +165,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_tu( @@ -174,7 +174,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmaxu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_tu( @@ -183,7 +183,7 @@ vuint16m1_t test_vmaxu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vmaxu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmaxu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vmaxu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_tu( @@ -210,7 +210,7 @@ vuint16m2_t test_vmaxu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmaxu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_tu( @@ -219,7 +219,7 @@ vuint16m4_t test_vmaxu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_tu( @@ -228,7 +228,7 @@ vuint16m4_t test_vmaxu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmaxu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_tu( @@ -237,7 +237,7 @@ vuint16m8_t test_vmaxu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint16m8_t test_vmaxu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmaxu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmaxu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vmaxu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vmaxu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmaxu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_tu( @@ -291,7 +291,7 @@ vuint32m2_t test_vmaxu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_tu( @@ -300,7 +300,7 @@ vuint32m2_t test_vmaxu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmaxu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_tu( @@ -309,7 +309,7 @@ vuint32m4_t test_vmaxu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_tu( @@ -318,7 +318,7 @@ vuint32m4_t test_vmaxu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmaxu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_tu( @@ -327,7 +327,7 @@ vuint32m8_t test_vmaxu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_tu( @@ -336,7 +336,7 @@ vuint32m8_t test_vmaxu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmaxu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_tu( @@ -345,7 +345,7 @@ vuint64m1_t test_vmaxu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_tu( @@ -354,7 +354,7 @@ vuint64m1_t test_vmaxu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmaxu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_tu( @@ -363,7 +363,7 @@ vuint64m2_t test_vmaxu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_tu( @@ -372,7 +372,7 @@ vuint64m2_t test_vmaxu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmaxu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_tu( @@ -381,7 +381,7 @@ vuint64m4_t test_vmaxu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_tu( @@ -390,7 +390,7 @@ vuint64m4_t test_vmaxu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmaxu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m8_t test_vmaxu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vmaxu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmaxu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_tum( @@ -417,7 +417,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_tum( @@ -426,7 +426,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmaxu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_tum( @@ -435,7 +435,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_tum( @@ -444,7 +444,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmaxu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_tum( @@ -453,7 +453,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_tum( @@ -462,7 +462,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmaxu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_tum( @@ -471,7 +471,7 @@ vuint8m1_t test_vmaxu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_tum( @@ -480,7 +480,7 @@ vuint8m1_t test_vmaxu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmaxu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_tum( @@ -489,7 +489,7 @@ vuint8m2_t test_vmaxu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_tum( @@ -498,7 +498,7 @@ vuint8m2_t test_vmaxu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmaxu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_tum( @@ -507,7 +507,7 @@ vuint8m4_t test_vmaxu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_tum( @@ -516,7 +516,7 @@ vuint8m4_t test_vmaxu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmaxu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_tum( @@ -525,7 +525,7 @@ vuint8m8_t test_vmaxu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_tum( @@ -534,7 +534,7 @@ vuint8m8_t test_vmaxu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmaxu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_tum( @@ -543,7 +543,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_tum( @@ -552,7 +552,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmaxu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_tum( @@ -561,7 +561,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_tum( @@ -570,7 +570,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmaxu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_tum( @@ -579,7 +579,7 @@ vuint16m1_t test_vmaxu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_tum( @@ -588,7 +588,7 @@ vuint16m1_t test_vmaxu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmaxu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_tum( @@ -597,7 +597,7 @@ vuint16m2_t test_vmaxu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_tum( @@ -606,7 +606,7 @@ vuint16m2_t test_vmaxu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmaxu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_tum( @@ -615,7 +615,7 @@ vuint16m4_t test_vmaxu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_tum( @@ -624,7 +624,7 @@ vuint16m4_t test_vmaxu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmaxu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_tum( @@ -633,7 +633,7 @@ vuint16m8_t test_vmaxu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tum( @@ -642,7 +642,7 @@ vuint16m8_t test_vmaxu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmaxu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tum( @@ -651,7 +651,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_tum( @@ -660,7 +660,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmaxu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_tum( @@ -669,7 +669,7 @@ vuint32m1_t test_vmaxu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_tum( @@ -678,7 +678,7 @@ vuint32m1_t test_vmaxu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmaxu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_tum( @@ -687,7 +687,7 @@ vuint32m2_t test_vmaxu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_tum( @@ -696,7 +696,7 @@ vuint32m2_t test_vmaxu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmaxu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_tum( @@ -705,7 +705,7 @@ vuint32m4_t test_vmaxu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_tum( @@ -714,7 +714,7 @@ vuint32m4_t test_vmaxu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmaxu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_tum( @@ -723,7 +723,7 @@ vuint32m8_t test_vmaxu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_tum( @@ -732,7 +732,7 @@ vuint32m8_t test_vmaxu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmaxu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_tum( @@ -741,7 +741,7 @@ vuint64m1_t test_vmaxu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_tum( @@ -750,7 +750,7 @@ vuint64m1_t test_vmaxu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmaxu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_tum( @@ -759,7 +759,7 @@ vuint64m2_t test_vmaxu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_tum( @@ -768,7 +768,7 @@ vuint64m2_t test_vmaxu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmaxu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_tum( @@ -777,7 +777,7 @@ vuint64m4_t test_vmaxu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_tum( @@ -786,7 +786,7 @@ vuint64m4_t test_vmaxu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmaxu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m8_t test_vmaxu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vmaxu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmaxu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_tumu( @@ -813,7 +813,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_tumu( @@ -822,7 +822,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmaxu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_tumu( @@ -831,7 +831,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_tumu( @@ -840,7 +840,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmaxu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_tumu( @@ -849,7 +849,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_tumu( @@ -858,7 +858,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmaxu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_tumu( @@ -867,7 +867,7 @@ vuint8m1_t test_vmaxu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_tumu( @@ -876,7 +876,7 @@ vuint8m1_t test_vmaxu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmaxu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_tumu( @@ -885,7 +885,7 @@ vuint8m2_t test_vmaxu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_tumu( @@ -894,7 +894,7 @@ vuint8m2_t test_vmaxu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmaxu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_tumu( @@ -903,7 +903,7 @@ vuint8m4_t test_vmaxu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_tumu( @@ -912,7 +912,7 @@ vuint8m4_t test_vmaxu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmaxu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_tumu( @@ -921,7 +921,7 @@ vuint8m8_t test_vmaxu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_tumu( @@ -930,7 +930,7 @@ vuint8m8_t test_vmaxu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmaxu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_tumu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_tumu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmaxu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_tumu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_tumu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmaxu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_tumu( @@ -975,7 +975,7 @@ vuint16m1_t test_vmaxu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_tumu( @@ -984,7 +984,7 @@ vuint16m1_t test_vmaxu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmaxu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_tumu( @@ -993,7 +993,7 @@ vuint16m2_t test_vmaxu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_tumu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vmaxu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmaxu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_tumu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vmaxu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_tumu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vmaxu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmaxu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_tumu( @@ -1029,7 +1029,7 @@ vuint16m8_t test_vmaxu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_tumu( @@ -1038,7 +1038,7 @@ vuint16m8_t test_vmaxu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmaxu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_tumu( @@ -1047,7 +1047,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_tumu( @@ -1056,7 +1056,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmaxu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_tumu( @@ -1065,7 +1065,7 @@ vuint32m1_t test_vmaxu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_tumu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vmaxu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmaxu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_tumu( @@ -1083,7 +1083,7 @@ vuint32m2_t test_vmaxu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_tumu( @@ -1092,7 +1092,7 @@ vuint32m2_t test_vmaxu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmaxu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_tumu( @@ -1101,7 +1101,7 @@ vuint32m4_t test_vmaxu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_tumu( @@ -1110,7 +1110,7 @@ vuint32m4_t test_vmaxu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmaxu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_tumu( @@ -1119,7 +1119,7 @@ vuint32m8_t test_vmaxu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_tumu( @@ -1128,7 +1128,7 @@ vuint32m8_t test_vmaxu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmaxu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_tumu( @@ -1137,7 +1137,7 @@ vuint64m1_t test_vmaxu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_tumu( @@ -1146,7 +1146,7 @@ vuint64m1_t test_vmaxu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmaxu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_tumu( @@ -1155,7 +1155,7 @@ vuint64m2_t test_vmaxu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_tumu( @@ -1164,7 +1164,7 @@ vuint64m2_t test_vmaxu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmaxu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_tumu( @@ -1173,7 +1173,7 @@ vuint64m4_t test_vmaxu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_tumu( @@ -1182,7 +1182,7 @@ vuint64m4_t test_vmaxu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmaxu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m8_t test_vmaxu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vmaxu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmaxu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_mu( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_mu( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmaxu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_mu( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_mu( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmaxu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_mu( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_mu( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmaxu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_mu( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vmaxu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_mu( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vmaxu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmaxu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_mu( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vmaxu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_mu( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vmaxu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmaxu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_mu( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vmaxu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_mu( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vmaxu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmaxu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_mu( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vmaxu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmaxu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_mu( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vmaxu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmaxu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_mu( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_mu( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmaxu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_mu( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_mu( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmaxu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_mu( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vmaxu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_mu( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vmaxu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmaxu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_mu( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vmaxu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_mu( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vmaxu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmaxu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_mu( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vmaxu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_mu( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vmaxu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmaxu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_mu( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vmaxu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmaxu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_mu( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vmaxu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmaxu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_mu( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_mu( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmaxu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_mu( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vmaxu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_mu( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vmaxu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmaxu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_mu( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vmaxu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_mu( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vmaxu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmaxu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_mu( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vmaxu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_mu( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vmaxu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmaxu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_mu( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vmaxu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmaxu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_mu( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vmaxu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmaxu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_mu( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vmaxu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_mu( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vmaxu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmaxu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_mu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vmaxu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_mu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vmaxu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmaxu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_mu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vmaxu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_mu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vmaxu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmaxu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vmaxu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmaxu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmaxu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c index 1f64362d1848..7a31bafb4ac1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_i8mf8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8mf8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_tu( @@ -22,7 +22,7 @@ vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_i8mf8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8mf8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_tu( @@ -31,7 +31,7 @@ vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_i8mf4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_tu( @@ -40,7 +40,7 @@ vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_i8mf4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_tu( @@ -49,7 +49,7 @@ vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_i8mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_tu( @@ -58,7 +58,7 @@ vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_i8mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_tu( @@ -67,7 +67,7 @@ vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_i8m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_tu( @@ -76,7 +76,7 @@ vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_i8m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_tu( @@ -85,7 +85,7 @@ vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_i8m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_tu( @@ -94,7 +94,7 @@ vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_i8m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_tu( @@ -103,7 +103,7 @@ vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { - return vmerge_vvm_i8m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_tu( @@ -112,7 +112,7 @@ vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { - return vmerge_vxm_i8m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_tu( @@ -121,7 +121,7 @@ vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { - return vmerge_vvm_i8m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i8m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_tu( @@ -130,7 +130,7 @@ vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { - return vmerge_vxm_i8m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i8m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_tu( @@ -139,7 +139,7 @@ vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_i16mf4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_tu( @@ -148,7 +148,7 @@ vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_i16mf4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_tu( @@ -157,7 +157,7 @@ vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_i16mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_tu( @@ -166,7 +166,7 @@ vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_i16mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_tu( @@ -175,7 +175,7 @@ vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_i16m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_tu( @@ -184,7 +184,7 @@ vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_i16m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_tu( @@ -193,7 +193,7 @@ vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_i16m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_tu( @@ -202,7 +202,7 @@ vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_i16m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_tu( @@ -211,7 +211,7 @@ vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_i16m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_tu( @@ -220,7 +220,7 @@ vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_i16m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_tu( @@ -229,7 +229,7 @@ vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { - return vmerge_vvm_i16m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_tu( @@ -238,7 +238,7 @@ vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { - return vmerge_vxm_i16m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu( @@ -247,7 +247,7 @@ vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_i32mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu( @@ -256,7 +256,7 @@ vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_i32mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_tu( @@ -265,7 +265,7 @@ vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_i32m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_tu( @@ -274,7 +274,7 @@ vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_i32m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_tu( @@ -283,7 +283,7 @@ vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_i32m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_tu( @@ -292,7 +292,7 @@ vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_i32m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_tu( @@ -301,7 +301,7 @@ vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_i32m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_tu( @@ -310,7 +310,7 @@ vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_i32m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_tu( @@ -319,7 +319,7 @@ vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_i32m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_tu( @@ -328,7 +328,7 @@ vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_i32m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_tu( @@ -337,7 +337,7 @@ vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_i64m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_tu( @@ -346,7 +346,7 @@ vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_i64m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_tu( @@ -355,7 +355,7 @@ vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_i64m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_tu( @@ -364,7 +364,7 @@ vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_i64m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_tu( @@ -373,7 +373,7 @@ vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_i64m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_tu( @@ -382,7 +382,7 @@ vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_i64m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_tu( @@ -391,7 +391,7 @@ vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_i64m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_i64m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_tu( @@ -400,7 +400,7 @@ vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_i64m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_i64m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_tu( @@ -409,7 +409,7 @@ vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_u8mf8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8mf8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_tu( @@ -418,7 +418,7 @@ vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_u8mf8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8mf8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_tu( @@ -427,7 +427,7 @@ vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_u8mf4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_tu( @@ -436,7 +436,7 @@ vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_u8mf4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_tu( @@ -445,7 +445,7 @@ vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_u8mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_tu( @@ -454,7 +454,7 @@ vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_u8mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_tu( @@ -463,7 +463,7 @@ vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_u8m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_tu( @@ -472,7 +472,7 @@ vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_u8m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_tu( @@ -481,7 +481,7 @@ vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_u8m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_tu( @@ -490,7 +490,7 @@ vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_u8m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_tu( @@ -499,7 +499,7 @@ vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { - return vmerge_vvm_u8m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_tu( @@ -508,7 +508,7 @@ vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { - return vmerge_vxm_u8m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_tu( @@ -517,7 +517,7 @@ vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { - return vmerge_vvm_u8m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u8m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_tu( @@ -526,7 +526,7 @@ vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { - return vmerge_vxm_u8m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u8m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_tu( @@ -535,7 +535,7 @@ vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_u16mf4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_tu( @@ -544,7 +544,7 @@ vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_u16mf4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_tu( @@ -553,7 +553,7 @@ vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_u16mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_tu( @@ -562,7 +562,7 @@ vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_u16mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_tu( @@ -571,7 +571,7 @@ vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_u16m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_tu( @@ -580,7 +580,7 @@ vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_u16m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_tu( @@ -589,7 +589,7 @@ vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_u16m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_tu( @@ -598,7 +598,7 @@ vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_u16m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_tu( @@ -607,7 +607,7 @@ vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_u16m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_tu( @@ -616,7 +616,7 @@ vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_u16m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_tu( @@ -625,7 +625,7 @@ vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { - return vmerge_vvm_u16m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_tu( @@ -634,7 +634,7 @@ vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { - return vmerge_vxm_u16m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( @@ -643,7 +643,7 @@ vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_u32mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( @@ -652,7 +652,7 @@ vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_u32mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_tu( @@ -661,7 +661,7 @@ vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_u32m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_tu( @@ -670,7 +670,7 @@ vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_u32m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_tu( @@ -679,7 +679,7 @@ vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_u32m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_tu( @@ -688,7 +688,7 @@ vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_u32m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_tu( @@ -697,7 +697,7 @@ vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_u32m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_tu( @@ -706,7 +706,7 @@ vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_u32m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_tu( @@ -715,7 +715,7 @@ vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_u32m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_tu( @@ -724,7 +724,7 @@ vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { - return vmerge_vxm_u32m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_tu( @@ -733,7 +733,7 @@ vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_u64m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_tu( @@ -742,7 +742,7 @@ vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { - return vmerge_vxm_u64m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_tu( @@ -751,7 +751,7 @@ vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_u64m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_tu( @@ -760,7 +760,7 @@ vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { - return vmerge_vxm_u64m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_tu( @@ -769,7 +769,7 @@ vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_u64m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_tu( @@ -778,7 +778,7 @@ vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { - return vmerge_vxm_u64m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_tu( @@ -787,7 +787,7 @@ vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_u64m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_u64m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_tu( @@ -796,7 +796,7 @@ vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { - return vmerge_vxm_u64m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vxm_u64m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_tu( @@ -805,7 +805,7 @@ vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_f16mf4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16mf4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_tu( @@ -814,7 +814,7 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_f16mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_tu( @@ -823,7 +823,7 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_f16m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_tu( @@ -832,7 +832,7 @@ vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_f16m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_tu( @@ -841,7 +841,7 @@ vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_f16m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_tu( @@ -850,7 +850,7 @@ vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { - return vmerge_vvm_f16m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f16m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( @@ -859,7 +859,7 @@ vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_f32mf2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f32mf2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_tu( @@ -868,7 +868,7 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_f32m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f32m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_tu( @@ -877,7 +877,7 @@ vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_f32m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f32m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_tu( @@ -886,7 +886,7 @@ vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_f32m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f32m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_tu( @@ -895,7 +895,7 @@ vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { - return vmerge_vvm_f32m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f32m8_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_tu( @@ -904,7 +904,7 @@ vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { - return vmerge_vvm_f64m1_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f64m1_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_tu( @@ -913,7 +913,7 @@ vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { - return vmerge_vvm_f64m2_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f64m2_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_tu( @@ -922,7 +922,7 @@ vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { - return vmerge_vvm_f64m4_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f64m4_tu(maskedoff, op1, op2, mask, vl); } // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_tu( @@ -931,6 +931,6 @@ vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { - return vmerge_vvm_f64m8_tu(maskedoff, op1, op2, mask, vl); + return __riscv_vmerge_vvm_f64m8_tu(maskedoff, op1, op2, mask, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfeq.c index 003e03d8021a..debb613b7142 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfeq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfeq.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfeq_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmfeq_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmfeq_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfeq_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmfeq_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmfeq_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfeq_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmfeq_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmfeq_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfeq_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmfeq_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmfeq_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfeq_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmfeq_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmfeq_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfeq_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfeq_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmfeq_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfeq_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmfeq_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfeq_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_mu( @@ -130,7 +130,7 @@ vbool64_t test_vmfeq_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfeq_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_mu( @@ -139,7 +139,7 @@ vbool64_t test_vmfeq_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfeq_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_mu( @@ -148,7 +148,7 @@ vbool32_t test_vmfeq_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_mu( @@ -157,7 +157,7 @@ vbool32_t test_vmfeq_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfeq_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_mu( @@ -166,7 +166,7 @@ vbool16_t test_vmfeq_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_mu( @@ -175,7 +175,7 @@ vbool16_t test_vmfeq_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfeq_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_mu( @@ -184,7 +184,7 @@ vbool8_t test_vmfeq_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_mu( @@ -193,7 +193,7 @@ vbool8_t test_vmfeq_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfeq_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_mu( @@ -202,7 +202,7 @@ vbool4_t test_vmfeq_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_mu( @@ -211,7 +211,7 @@ vbool4_t test_vmfeq_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfeq_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_mu( @@ -220,7 +220,7 @@ vbool64_t test_vmfeq_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_mu( @@ -229,7 +229,7 @@ vbool64_t test_vmfeq_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfeq_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_mu( @@ -238,7 +238,7 @@ vbool32_t test_vmfeq_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_mu( @@ -247,7 +247,7 @@ vbool32_t test_vmfeq_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfeq_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_mu( @@ -256,7 +256,7 @@ vbool16_t test_vmfeq_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_mu( @@ -265,7 +265,7 @@ vbool16_t test_vmfeq_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfeq_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_mu( @@ -274,6 +274,6 @@ vbool8_t test_vmfeq_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfeq_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfge.c index e8a98e167bfb..0186eab64a93 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfge.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfge_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmfge_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmfge_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfge_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmfge_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmfge_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfge_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmfge_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmfge_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfge_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmfge_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmfge_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfge_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmfge_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmfge_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfge_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfge_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmfge_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfge_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmfge_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfge_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_mu( @@ -130,7 +130,7 @@ vbool64_t test_vmfge_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfge_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32_mu( @@ -139,7 +139,7 @@ vbool64_t test_vmfge_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfge_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_mu( @@ -148,7 +148,7 @@ vbool32_t test_vmfge_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfge_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16_mu( @@ -157,7 +157,7 @@ vbool32_t test_vmfge_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfge_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_mu( @@ -166,7 +166,7 @@ vbool16_t test_vmfge_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfge_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8_mu( @@ -175,7 +175,7 @@ vbool16_t test_vmfge_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfge_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_mu( @@ -184,7 +184,7 @@ vbool8_t test_vmfge_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfge_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4_mu( @@ -193,7 +193,7 @@ vbool8_t test_vmfge_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfge_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_mu( @@ -202,7 +202,7 @@ vbool4_t test_vmfge_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfge_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64_mu( @@ -211,7 +211,7 @@ vbool4_t test_vmfge_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfge_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_mu( @@ -220,7 +220,7 @@ vbool64_t test_vmfge_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfge_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32_mu( @@ -229,7 +229,7 @@ vbool64_t test_vmfge_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfge_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_mu( @@ -238,7 +238,7 @@ vbool32_t test_vmfge_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfge_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16_mu( @@ -247,7 +247,7 @@ vbool32_t test_vmfge_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfge_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_mu( @@ -256,7 +256,7 @@ vbool16_t test_vmfge_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfge_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8_mu( @@ -265,7 +265,7 @@ vbool16_t test_vmfge_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfge_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_mu( @@ -274,6 +274,6 @@ vbool8_t test_vmfge_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfge_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfge_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfgt.c index a0e265c805c6..df9c41e0e86e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfgt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfgt_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmfgt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmfgt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfgt_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmfgt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmfgt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfgt_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmfgt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmfgt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfgt_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmfgt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmfgt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfgt_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmfgt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmfgt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfgt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfgt_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmfgt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfgt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmfgt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfgt_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_mu( @@ -130,7 +130,7 @@ vbool64_t test_vmfgt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfgt_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32_mu( @@ -139,7 +139,7 @@ vbool64_t test_vmfgt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfgt_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_mu( @@ -148,7 +148,7 @@ vbool32_t test_vmfgt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16_mu( @@ -157,7 +157,7 @@ vbool32_t test_vmfgt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfgt_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_mu( @@ -166,7 +166,7 @@ vbool16_t test_vmfgt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8_mu( @@ -175,7 +175,7 @@ vbool16_t test_vmfgt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfgt_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_mu( @@ -184,7 +184,7 @@ vbool8_t test_vmfgt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4_mu( @@ -193,7 +193,7 @@ vbool8_t test_vmfgt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfgt_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_mu( @@ -202,7 +202,7 @@ vbool4_t test_vmfgt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfgt_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64_mu( @@ -211,7 +211,7 @@ vbool4_t test_vmfgt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfgt_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_mu( @@ -220,7 +220,7 @@ vbool64_t test_vmfgt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32_mu( @@ -229,7 +229,7 @@ vbool64_t test_vmfgt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfgt_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_mu( @@ -238,7 +238,7 @@ vbool32_t test_vmfgt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16_mu( @@ -247,7 +247,7 @@ vbool32_t test_vmfgt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfgt_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_mu( @@ -256,7 +256,7 @@ vbool16_t test_vmfgt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8_mu( @@ -265,7 +265,7 @@ vbool16_t test_vmfgt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfgt_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_mu( @@ -274,6 +274,6 @@ vbool8_t test_vmfgt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfgt_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfgt_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfle.c index bc7072d091be..f7c11ae39611 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfle.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfle_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmfle_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmfle_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfle_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmfle_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmfle_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfle_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmfle_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmfle_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfle_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmfle_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmfle_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfle_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmfle_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmfle_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfle_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfle_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmfle_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfle_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmfle_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfle_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_mu( @@ -130,7 +130,7 @@ vbool64_t test_vmfle_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfle_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_mu( @@ -139,7 +139,7 @@ vbool64_t test_vmfle_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfle_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_mu( @@ -148,7 +148,7 @@ vbool32_t test_vmfle_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfle_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_mu( @@ -157,7 +157,7 @@ vbool32_t test_vmfle_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfle_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_mu( @@ -166,7 +166,7 @@ vbool16_t test_vmfle_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfle_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_mu( @@ -175,7 +175,7 @@ vbool16_t test_vmfle_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfle_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_mu( @@ -184,7 +184,7 @@ vbool8_t test_vmfle_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfle_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_mu( @@ -193,7 +193,7 @@ vbool8_t test_vmfle_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfle_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_mu( @@ -202,7 +202,7 @@ vbool4_t test_vmfle_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfle_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_mu( @@ -211,7 +211,7 @@ vbool4_t test_vmfle_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfle_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_mu( @@ -220,7 +220,7 @@ vbool64_t test_vmfle_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfle_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_mu( @@ -229,7 +229,7 @@ vbool64_t test_vmfle_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfle_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_mu( @@ -238,7 +238,7 @@ vbool32_t test_vmfle_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfle_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_mu( @@ -247,7 +247,7 @@ vbool32_t test_vmfle_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfle_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_mu( @@ -256,7 +256,7 @@ vbool16_t test_vmfle_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfle_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_mu( @@ -265,7 +265,7 @@ vbool16_t test_vmfle_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfle_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_mu( @@ -274,6 +274,6 @@ vbool8_t test_vmfle_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfle_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfle_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmflt.c index 864357473ad1..f6f57f2550e5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmflt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmflt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmflt_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmflt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmflt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmflt_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmflt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmflt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmflt_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmflt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmflt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmflt_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmflt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmflt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmflt_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmflt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmflt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmflt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmflt_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmflt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmflt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmflt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmflt_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_mu( @@ -130,7 +130,7 @@ vbool64_t test_vmflt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmflt_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_mu( @@ -139,7 +139,7 @@ vbool64_t test_vmflt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmflt_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_mu( @@ -148,7 +148,7 @@ vbool32_t test_vmflt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmflt_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_mu( @@ -157,7 +157,7 @@ vbool32_t test_vmflt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmflt_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_mu( @@ -166,7 +166,7 @@ vbool16_t test_vmflt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmflt_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_mu( @@ -175,7 +175,7 @@ vbool16_t test_vmflt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmflt_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_mu( @@ -184,7 +184,7 @@ vbool8_t test_vmflt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmflt_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_mu( @@ -193,7 +193,7 @@ vbool8_t test_vmflt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmflt_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_mu( @@ -202,7 +202,7 @@ vbool4_t test_vmflt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmflt_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_mu( @@ -211,7 +211,7 @@ vbool4_t test_vmflt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmflt_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_mu( @@ -220,7 +220,7 @@ vbool64_t test_vmflt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmflt_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_mu( @@ -229,7 +229,7 @@ vbool64_t test_vmflt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmflt_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_mu( @@ -238,7 +238,7 @@ vbool32_t test_vmflt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmflt_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_mu( @@ -247,7 +247,7 @@ vbool32_t test_vmflt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmflt_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_mu( @@ -256,7 +256,7 @@ vbool16_t test_vmflt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmflt_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_mu( @@ -265,7 +265,7 @@ vbool16_t test_vmflt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmflt_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_mu( @@ -274,6 +274,6 @@ vbool8_t test_vmflt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmflt_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmflt_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfne.c index ba03163a97f7..75d5cf442de6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmfne.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfne_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmfne_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmfne_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfne_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmfne_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmfne_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfne_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmfne_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmfne_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfne_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmfne_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmfne_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfne_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmfne_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmfne_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfne_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfne_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmfne_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmfne_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmfne_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vmfne_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_mu( @@ -130,7 +130,7 @@ vbool64_t test_vmfne_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vmfne_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_mu( @@ -139,7 +139,7 @@ vbool64_t test_vmfne_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vmfne_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_mu( @@ -148,7 +148,7 @@ vbool32_t test_vmfne_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vmfne_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_mu( @@ -157,7 +157,7 @@ vbool32_t test_vmfne_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vmfne_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_mu( @@ -166,7 +166,7 @@ vbool16_t test_vmfne_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vmfne_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_mu( @@ -175,7 +175,7 @@ vbool16_t test_vmfne_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vmfne_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_mu( @@ -184,7 +184,7 @@ vbool8_t test_vmfne_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vmfne_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_mu( @@ -193,7 +193,7 @@ vbool8_t test_vmfne_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vmfne_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_mu( @@ -202,7 +202,7 @@ vbool4_t test_vmfne_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vmfne_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_mu( @@ -211,7 +211,7 @@ vbool4_t test_vmfne_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vmfne_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_mu( @@ -220,7 +220,7 @@ vbool64_t test_vmfne_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vmfne_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_mu( @@ -229,7 +229,7 @@ vbool64_t test_vmfne_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vmfne_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_mu( @@ -238,7 +238,7 @@ vbool32_t test_vmfne_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vmfne_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_mu( @@ -247,7 +247,7 @@ vbool32_t test_vmfne_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vmfne_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_mu( @@ -256,7 +256,7 @@ vbool16_t test_vmfne_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vmfne_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_mu( @@ -265,7 +265,7 @@ vbool16_t test_vmfne_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vmfne_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_mu( @@ -274,6 +274,6 @@ vbool8_t test_vmfne_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vmfne_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmfne_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmin.c index 8beaf1dc93b0..6bd7ca532697 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmin.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmin_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vmin_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vmin_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmin_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vmin_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vmin_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmin_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vmin_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vmin_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmin_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vmin_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vmin_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmin_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vmin_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vmin_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmin_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vmin_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vmin_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmin_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vmin_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vmin_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmin_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vmin_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vmin_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmin_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vmin_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vmin_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmin_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vmin_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vmin_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmin_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vmin_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vmin_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmin_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vmin_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vmin_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmin_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vmin_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vmin_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmin_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmin_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vmin_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vmin_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmin_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vmin_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vmin_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmin_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vmin_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vmin_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmin_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vmin_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vmin_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmin_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vmin_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vmin_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmin_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vmin_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vmin_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmin_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vmin_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vmin_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmin_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vmin_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vmin_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmin_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vmin_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vmin_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmin_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vmin_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vmin_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmin_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vmin_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vmin_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmin_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vmin_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vmin_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmin_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vmin_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vmin_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmin_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vmin_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vmin_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmin_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vmin_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vmin_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmin_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vmin_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vmin_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmin_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vmin_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vmin_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmin_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vmin_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vmin_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmin_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vmin_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vmin_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmin_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vmin_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vmin_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmin_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vmin_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vmin_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmin_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vmin_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vmin_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmin_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vmin_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vmin_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmin_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vmin_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vmin_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmin_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vmin_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vmin_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmin_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vmin_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vmin_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmin_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vmin_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vmin_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmin_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vmin_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vmin_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmin_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vmin_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vmin_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmin_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vmin_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vmin_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmin_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vmin_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vmin_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmin_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vmin_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vmin_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmin_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vmin_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vmin_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmin_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vmin_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vmin_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmin_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vmin_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vmin_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmin_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vmin_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vmin_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmin_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vmin_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vmin_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmin_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vmin_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vmin_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmin_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vmin_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vmin_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmin_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vmin_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vmin_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmin_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vmin_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vmin_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmin_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vmin_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vmin_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmin_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vmin_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vmin_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmin_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmin_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vmin_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vmin_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmin_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vmin_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vmin_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmin_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vmin_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vmin_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmin_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vmin_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vmin_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmin_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vmin_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vmin_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmin_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vmin_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vmin_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmin_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vmin_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vmin_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmin_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vmin_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vmin_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmin_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vmin_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vmin_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmin_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vmin_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vmin_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmin_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vmin_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vmin_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmin_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vmin_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vmin_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmin_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vmin_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vmin_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmin_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vmin_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vmin_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmin_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vmin_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmin_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vmin_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmin_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vmin_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vmin_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmin_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vmin_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vmin_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmin_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vmin_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vmin_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmin_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vmin_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vmin_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmin_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vmin_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vmin_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmin_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vmin_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmin_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vmin_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmin_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vmin_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vmin_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmin_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vmin_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vmin_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmin_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vmin_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vmin_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmin_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vmin_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vmin_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmin_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vmin_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmin_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vmin_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmin_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vmin_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vmin_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmin_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vmin_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vmin_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmin_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vmin_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vmin_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmin_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vmin_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmin_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmin_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vminu.c index 06d342d1f073..1ccc1420cb2b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vminu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vminu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vminu_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vminu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vminu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vminu_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vminu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vminu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vminu_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vminu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vminu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vminu_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vminu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vminu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vminu_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vminu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vminu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vminu_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vminu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vminu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vminu_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m8_t test_vminu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_tu( @@ -138,7 +138,7 @@ vuint8m8_t test_vminu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vminu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_tu( @@ -147,7 +147,7 @@ vuint16mf4_t test_vminu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_tu( @@ -156,7 +156,7 @@ vuint16mf4_t test_vminu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vminu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_tu( @@ -165,7 +165,7 @@ vuint16mf2_t test_vminu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_tu( @@ -174,7 +174,7 @@ vuint16mf2_t test_vminu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vminu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_tu( @@ -183,7 +183,7 @@ vuint16m1_t test_vminu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vminu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vminu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vminu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_tu( @@ -210,7 +210,7 @@ vuint16m2_t test_vminu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vminu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_tu( @@ -219,7 +219,7 @@ vuint16m4_t test_vminu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_tu( @@ -228,7 +228,7 @@ vuint16m4_t test_vminu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vminu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_tu( @@ -237,7 +237,7 @@ vuint16m8_t test_vminu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint16m8_t test_vminu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vminu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vminu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vminu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vminu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vminu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_tu( @@ -291,7 +291,7 @@ vuint32m2_t test_vminu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_tu( @@ -300,7 +300,7 @@ vuint32m2_t test_vminu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vminu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_tu( @@ -309,7 +309,7 @@ vuint32m4_t test_vminu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_tu( @@ -318,7 +318,7 @@ vuint32m4_t test_vminu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vminu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_tu( @@ -327,7 +327,7 @@ vuint32m8_t test_vminu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_tu( @@ -336,7 +336,7 @@ vuint32m8_t test_vminu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vminu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_tu( @@ -345,7 +345,7 @@ vuint64m1_t test_vminu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_tu( @@ -354,7 +354,7 @@ vuint64m1_t test_vminu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vminu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_tu( @@ -363,7 +363,7 @@ vuint64m2_t test_vminu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_tu( @@ -372,7 +372,7 @@ vuint64m2_t test_vminu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vminu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_tu( @@ -381,7 +381,7 @@ vuint64m4_t test_vminu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_tu( @@ -390,7 +390,7 @@ vuint64m4_t test_vminu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vminu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m8_t test_vminu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vminu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vminu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_tum( @@ -417,7 +417,7 @@ vuint8mf8_t test_vminu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_tum( @@ -426,7 +426,7 @@ vuint8mf8_t test_vminu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vminu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_tum( @@ -435,7 +435,7 @@ vuint8mf4_t test_vminu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_tum( @@ -444,7 +444,7 @@ vuint8mf4_t test_vminu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vminu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_tum( @@ -453,7 +453,7 @@ vuint8mf2_t test_vminu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_tum( @@ -462,7 +462,7 @@ vuint8mf2_t test_vminu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vminu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_tum( @@ -471,7 +471,7 @@ vuint8m1_t test_vminu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_tum( @@ -480,7 +480,7 @@ vuint8m1_t test_vminu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vminu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_tum( @@ -489,7 +489,7 @@ vuint8m2_t test_vminu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_tum( @@ -498,7 +498,7 @@ vuint8m2_t test_vminu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vminu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_tum( @@ -507,7 +507,7 @@ vuint8m4_t test_vminu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_tum( @@ -516,7 +516,7 @@ vuint8m4_t test_vminu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vminu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_tum( @@ -525,7 +525,7 @@ vuint8m8_t test_vminu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_tum( @@ -534,7 +534,7 @@ vuint8m8_t test_vminu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vminu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_tum( @@ -543,7 +543,7 @@ vuint16mf4_t test_vminu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_tum( @@ -552,7 +552,7 @@ vuint16mf4_t test_vminu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vminu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_tum( @@ -561,7 +561,7 @@ vuint16mf2_t test_vminu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_tum( @@ -570,7 +570,7 @@ vuint16mf2_t test_vminu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vminu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_tum( @@ -579,7 +579,7 @@ vuint16m1_t test_vminu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_tum( @@ -588,7 +588,7 @@ vuint16m1_t test_vminu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vminu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_tum( @@ -597,7 +597,7 @@ vuint16m2_t test_vminu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_tum( @@ -606,7 +606,7 @@ vuint16m2_t test_vminu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vminu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_tum( @@ -615,7 +615,7 @@ vuint16m4_t test_vminu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_tum( @@ -624,7 +624,7 @@ vuint16m4_t test_vminu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vminu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_tum( @@ -633,7 +633,7 @@ vuint16m8_t test_vminu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tum( @@ -642,7 +642,7 @@ vuint16m8_t test_vminu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vminu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tum( @@ -651,7 +651,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_tum( @@ -660,7 +660,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vminu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_tum( @@ -669,7 +669,7 @@ vuint32m1_t test_vminu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_tum( @@ -678,7 +678,7 @@ vuint32m1_t test_vminu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vminu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_tum( @@ -687,7 +687,7 @@ vuint32m2_t test_vminu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_tum( @@ -696,7 +696,7 @@ vuint32m2_t test_vminu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vminu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_tum( @@ -705,7 +705,7 @@ vuint32m4_t test_vminu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_tum( @@ -714,7 +714,7 @@ vuint32m4_t test_vminu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vminu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_tum( @@ -723,7 +723,7 @@ vuint32m8_t test_vminu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_tum( @@ -732,7 +732,7 @@ vuint32m8_t test_vminu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vminu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_tum( @@ -741,7 +741,7 @@ vuint64m1_t test_vminu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_tum( @@ -750,7 +750,7 @@ vuint64m1_t test_vminu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vminu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_tum( @@ -759,7 +759,7 @@ vuint64m2_t test_vminu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_tum( @@ -768,7 +768,7 @@ vuint64m2_t test_vminu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vminu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_tum( @@ -777,7 +777,7 @@ vuint64m4_t test_vminu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_tum( @@ -786,7 +786,7 @@ vuint64m4_t test_vminu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vminu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m8_t test_vminu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vminu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vminu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_tumu( @@ -813,7 +813,7 @@ vuint8mf8_t test_vminu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_tumu( @@ -822,7 +822,7 @@ vuint8mf8_t test_vminu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vminu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_tumu( @@ -831,7 +831,7 @@ vuint8mf4_t test_vminu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_tumu( @@ -840,7 +840,7 @@ vuint8mf4_t test_vminu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vminu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_tumu( @@ -849,7 +849,7 @@ vuint8mf2_t test_vminu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_tumu( @@ -858,7 +858,7 @@ vuint8mf2_t test_vminu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vminu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_tumu( @@ -867,7 +867,7 @@ vuint8m1_t test_vminu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_tumu( @@ -876,7 +876,7 @@ vuint8m1_t test_vminu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vminu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_tumu( @@ -885,7 +885,7 @@ vuint8m2_t test_vminu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_tumu( @@ -894,7 +894,7 @@ vuint8m2_t test_vminu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vminu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_tumu( @@ -903,7 +903,7 @@ vuint8m4_t test_vminu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_tumu( @@ -912,7 +912,7 @@ vuint8m4_t test_vminu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vminu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_tumu( @@ -921,7 +921,7 @@ vuint8m8_t test_vminu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_tumu( @@ -930,7 +930,7 @@ vuint8m8_t test_vminu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vminu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_tumu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vminu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_tumu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vminu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vminu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_tumu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vminu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_tumu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vminu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vminu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_tumu( @@ -975,7 +975,7 @@ vuint16m1_t test_vminu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_tumu( @@ -984,7 +984,7 @@ vuint16m1_t test_vminu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vminu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_tumu( @@ -993,7 +993,7 @@ vuint16m2_t test_vminu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_tumu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vminu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vminu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_tumu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vminu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_tumu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vminu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vminu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_tumu( @@ -1029,7 +1029,7 @@ vuint16m8_t test_vminu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_tumu( @@ -1038,7 +1038,7 @@ vuint16m8_t test_vminu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vminu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_tumu( @@ -1047,7 +1047,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_tumu( @@ -1056,7 +1056,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vminu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_tumu( @@ -1065,7 +1065,7 @@ vuint32m1_t test_vminu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_tumu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vminu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vminu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_tumu( @@ -1083,7 +1083,7 @@ vuint32m2_t test_vminu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_tumu( @@ -1092,7 +1092,7 @@ vuint32m2_t test_vminu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vminu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_tumu( @@ -1101,7 +1101,7 @@ vuint32m4_t test_vminu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_tumu( @@ -1110,7 +1110,7 @@ vuint32m4_t test_vminu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vminu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_tumu( @@ -1119,7 +1119,7 @@ vuint32m8_t test_vminu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_tumu( @@ -1128,7 +1128,7 @@ vuint32m8_t test_vminu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vminu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_tumu( @@ -1137,7 +1137,7 @@ vuint64m1_t test_vminu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_tumu( @@ -1146,7 +1146,7 @@ vuint64m1_t test_vminu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vminu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_tumu( @@ -1155,7 +1155,7 @@ vuint64m2_t test_vminu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_tumu( @@ -1164,7 +1164,7 @@ vuint64m2_t test_vminu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vminu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_tumu( @@ -1173,7 +1173,7 @@ vuint64m4_t test_vminu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_tumu( @@ -1182,7 +1182,7 @@ vuint64m4_t test_vminu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vminu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m8_t test_vminu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vminu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vminu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_mu( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vminu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_mu( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vminu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vminu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_mu( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vminu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_mu( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vminu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vminu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_mu( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vminu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_mu( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vminu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vminu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_mu( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vminu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_mu( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vminu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vminu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_mu( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vminu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_mu( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vminu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vminu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_mu( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vminu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_mu( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vminu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vminu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_mu( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vminu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vminu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_mu( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vminu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vminu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_mu( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vminu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_mu( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vminu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vminu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_mu( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vminu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_mu( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vminu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vminu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_mu( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vminu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_mu( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vminu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vminu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_mu( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vminu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_mu( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vminu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vminu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_mu( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vminu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_mu( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vminu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vminu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_mu( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vminu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vminu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_mu( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vminu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vminu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_mu( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_mu( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vminu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_mu( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vminu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_mu( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vminu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vminu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_mu( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vminu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_mu( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vminu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vminu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_mu( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vminu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_mu( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vminu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vminu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_mu( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vminu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vminu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_mu( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vminu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vminu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_mu( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vminu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_mu( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vminu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vminu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_mu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vminu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_mu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vminu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vminu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_mu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vminu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_mu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vminu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vminu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vminu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vminu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vminu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsbf.c index 7b271a232b9c..a187d97a9c2b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsbf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsbf.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbf_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return vmsbf_m_b1_mu(mask, maskedoff, op1, vl); + return __riscv_vmsbf_m_b1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b2_mu( @@ -21,7 +21,7 @@ vbool1_t test_vmsbf_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbf_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return vmsbf_m_b2_mu(mask, maskedoff, op1, vl); + return __riscv_vmsbf_m_b2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b4_mu( @@ -30,7 +30,7 @@ vbool2_t test_vmsbf_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbf_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return vmsbf_m_b4_mu(mask, maskedoff, op1, vl); + return __riscv_vmsbf_m_b4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b8_mu( @@ -39,7 +39,7 @@ vbool4_t test_vmsbf_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbf_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return vmsbf_m_b8_mu(mask, maskedoff, op1, vl); + return __riscv_vmsbf_m_b8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b16_mu( @@ -48,7 +48,7 @@ vbool8_t test_vmsbf_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbf_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return vmsbf_m_b16_mu(mask, maskedoff, op1, vl); + return __riscv_vmsbf_m_b16_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b32_mu( @@ -57,7 +57,7 @@ vbool16_t test_vmsbf_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbf_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return vmsbf_m_b32_mu(mask, maskedoff, op1, vl); + return __riscv_vmsbf_m_b32_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsbf_m_b64_mu( @@ -66,6 +66,6 @@ vbool32_t test_vmsbf_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbf_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return vmsbf_m_b64_mu(mask, maskedoff, op1, vl); + return __riscv_vmsbf_m_b64_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmseq.c index b6c9efbbc19e..05273d8931c8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmseq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmseq.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmseq_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmseq_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmseq_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmseq_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmseq_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmseq_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmseq_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmseq_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmseq_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmseq_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmseq_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmseq_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmseq_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmseq_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmseq_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmseq_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmseq_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmseq_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmseq_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_mu( @@ -130,7 +130,7 @@ vbool1_t test_vmseq_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmseq_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_mu( @@ -139,7 +139,7 @@ vbool1_t test_vmseq_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmseq_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_mu( @@ -148,7 +148,7 @@ vbool64_t test_vmseq_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_mu( @@ -157,7 +157,7 @@ vbool64_t test_vmseq_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmseq_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_mu( @@ -166,7 +166,7 @@ vbool32_t test_vmseq_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_mu( @@ -175,7 +175,7 @@ vbool32_t test_vmseq_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmseq_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_mu( @@ -184,7 +184,7 @@ vbool16_t test_vmseq_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_mu( @@ -193,7 +193,7 @@ vbool16_t test_vmseq_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmseq_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_mu( @@ -202,7 +202,7 @@ vbool8_t test_vmseq_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_mu( @@ -211,7 +211,7 @@ vbool8_t test_vmseq_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmseq_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_mu( @@ -220,7 +220,7 @@ vbool4_t test_vmseq_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_mu( @@ -229,7 +229,7 @@ vbool4_t test_vmseq_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmseq_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_mu( @@ -238,7 +238,7 @@ vbool2_t test_vmseq_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmseq_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_mu( @@ -247,7 +247,7 @@ vbool2_t test_vmseq_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmseq_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_mu( @@ -256,7 +256,7 @@ vbool64_t test_vmseq_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_mu( @@ -265,7 +265,7 @@ vbool64_t test_vmseq_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmseq_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_mu( @@ -274,7 +274,7 @@ vbool32_t test_vmseq_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_mu( @@ -283,7 +283,7 @@ vbool32_t test_vmseq_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmseq_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_mu( @@ -292,7 +292,7 @@ vbool16_t test_vmseq_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_mu( @@ -301,7 +301,7 @@ vbool16_t test_vmseq_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmseq_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_mu( @@ -310,7 +310,7 @@ vbool8_t test_vmseq_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_mu( @@ -319,7 +319,7 @@ vbool8_t test_vmseq_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmseq_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_mu( @@ -328,7 +328,7 @@ vbool4_t test_vmseq_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmseq_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_mu( @@ -337,7 +337,7 @@ vbool4_t test_vmseq_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmseq_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_mu( @@ -346,7 +346,7 @@ vbool64_t test_vmseq_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_mu( @@ -355,7 +355,7 @@ vbool64_t test_vmseq_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmseq_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_mu( @@ -364,7 +364,7 @@ vbool32_t test_vmseq_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_mu( @@ -373,7 +373,7 @@ vbool32_t test_vmseq_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmseq_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_mu( @@ -382,7 +382,7 @@ vbool16_t test_vmseq_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_mu( @@ -391,7 +391,7 @@ vbool16_t test_vmseq_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmseq_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_mu( @@ -400,7 +400,7 @@ vbool8_t test_vmseq_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmseq_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_mu( @@ -409,7 +409,7 @@ vbool8_t test_vmseq_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmseq_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_mu( @@ -418,7 +418,7 @@ vbool64_t test_vmseq_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_mu( @@ -427,7 +427,7 @@ vbool64_t test_vmseq_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmseq_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_mu( @@ -436,7 +436,7 @@ vbool32_t test_vmseq_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_mu( @@ -445,7 +445,7 @@ vbool32_t test_vmseq_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmseq_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_mu( @@ -454,7 +454,7 @@ vbool16_t test_vmseq_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_mu( @@ -463,7 +463,7 @@ vbool16_t test_vmseq_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmseq_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_mu( @@ -472,7 +472,7 @@ vbool8_t test_vmseq_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_mu( @@ -481,7 +481,7 @@ vbool8_t test_vmseq_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmseq_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_mu( @@ -490,7 +490,7 @@ vbool4_t test_vmseq_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_mu( @@ -499,7 +499,7 @@ vbool4_t test_vmseq_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmseq_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_mu( @@ -508,7 +508,7 @@ vbool2_t test_vmseq_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_mu( @@ -517,7 +517,7 @@ vbool2_t test_vmseq_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmseq_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_mu( @@ -526,7 +526,7 @@ vbool1_t test_vmseq_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmseq_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_mu( @@ -535,7 +535,7 @@ vbool1_t test_vmseq_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmseq_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_mu( @@ -544,7 +544,7 @@ vbool64_t test_vmseq_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_mu( @@ -553,7 +553,7 @@ vbool64_t test_vmseq_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmseq_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_mu( @@ -562,7 +562,7 @@ vbool32_t test_vmseq_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_mu( @@ -571,7 +571,7 @@ vbool32_t test_vmseq_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmseq_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_mu( @@ -580,7 +580,7 @@ vbool16_t test_vmseq_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_mu( @@ -589,7 +589,7 @@ vbool16_t test_vmseq_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmseq_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_mu( @@ -598,7 +598,7 @@ vbool8_t test_vmseq_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_mu( @@ -607,7 +607,7 @@ vbool8_t test_vmseq_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmseq_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_mu( @@ -616,7 +616,7 @@ vbool4_t test_vmseq_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_mu( @@ -625,7 +625,7 @@ vbool4_t test_vmseq_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmseq_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_mu( @@ -634,7 +634,7 @@ vbool2_t test_vmseq_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmseq_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_mu( @@ -643,7 +643,7 @@ vbool2_t test_vmseq_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmseq_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_mu( @@ -652,7 +652,7 @@ vbool64_t test_vmseq_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_mu( @@ -661,7 +661,7 @@ vbool64_t test_vmseq_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmseq_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_mu( @@ -670,7 +670,7 @@ vbool32_t test_vmseq_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_mu( @@ -679,7 +679,7 @@ vbool32_t test_vmseq_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmseq_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_mu( @@ -688,7 +688,7 @@ vbool16_t test_vmseq_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_mu( @@ -697,7 +697,7 @@ vbool16_t test_vmseq_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmseq_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_mu( @@ -706,7 +706,7 @@ vbool8_t test_vmseq_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_mu( @@ -715,7 +715,7 @@ vbool8_t test_vmseq_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmseq_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_mu( @@ -724,7 +724,7 @@ vbool4_t test_vmseq_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmseq_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_mu( @@ -733,7 +733,7 @@ vbool4_t test_vmseq_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmseq_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_mu( @@ -742,7 +742,7 @@ vbool64_t test_vmseq_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_mu( @@ -751,7 +751,7 @@ vbool64_t test_vmseq_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmseq_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_mu( @@ -760,7 +760,7 @@ vbool32_t test_vmseq_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_mu( @@ -769,7 +769,7 @@ vbool32_t test_vmseq_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmseq_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_mu( @@ -778,7 +778,7 @@ vbool16_t test_vmseq_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_mu( @@ -787,7 +787,7 @@ vbool16_t test_vmseq_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmseq_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_mu( @@ -796,6 +796,6 @@ vbool8_t test_vmseq_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmseq_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmseq_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsge.c index cf92344b71d9..7da7613f2107 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsge.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsge_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmsge_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmsge_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsge_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmsge_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmsge_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsge_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmsge_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmsge_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsge_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmsge_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmsge_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsge_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmsge_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmsge_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsge_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmsge_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmsge_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsge_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsge_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1_mu( @@ -130,7 +130,7 @@ vbool1_t test_vmsge_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsge_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsge_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64_mu( @@ -139,7 +139,7 @@ vbool1_t test_vmsge_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsge_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64_mu( @@ -148,7 +148,7 @@ vbool64_t test_vmsge_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32_mu( @@ -157,7 +157,7 @@ vbool64_t test_vmsge_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsge_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32_mu( @@ -166,7 +166,7 @@ vbool32_t test_vmsge_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16_mu( @@ -175,7 +175,7 @@ vbool32_t test_vmsge_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsge_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16_mu( @@ -184,7 +184,7 @@ vbool16_t test_vmsge_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8_mu( @@ -193,7 +193,7 @@ vbool16_t test_vmsge_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsge_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8_mu( @@ -202,7 +202,7 @@ vbool8_t test_vmsge_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4_mu( @@ -211,7 +211,7 @@ vbool8_t test_vmsge_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsge_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4_mu( @@ -220,7 +220,7 @@ vbool4_t test_vmsge_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2_mu( @@ -229,7 +229,7 @@ vbool4_t test_vmsge_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsge_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2_mu( @@ -238,7 +238,7 @@ vbool2_t test_vmsge_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsge_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_mu( @@ -247,7 +247,7 @@ vbool2_t test_vmsge_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsge_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_mu( @@ -256,7 +256,7 @@ vbool64_t test_vmsge_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32_mu( @@ -265,7 +265,7 @@ vbool64_t test_vmsge_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsge_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32_mu( @@ -274,7 +274,7 @@ vbool32_t test_vmsge_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16_mu( @@ -283,7 +283,7 @@ vbool32_t test_vmsge_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsge_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16_mu( @@ -292,7 +292,7 @@ vbool16_t test_vmsge_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8_mu( @@ -301,7 +301,7 @@ vbool16_t test_vmsge_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsge_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8_mu( @@ -310,7 +310,7 @@ vbool8_t test_vmsge_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4_mu( @@ -319,7 +319,7 @@ vbool8_t test_vmsge_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsge_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4_mu( @@ -328,7 +328,7 @@ vbool4_t test_vmsge_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsge_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64_mu( @@ -337,7 +337,7 @@ vbool4_t test_vmsge_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsge_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64_mu( @@ -346,7 +346,7 @@ vbool64_t test_vmsge_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32_mu( @@ -355,7 +355,7 @@ vbool64_t test_vmsge_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsge_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32_mu( @@ -364,7 +364,7 @@ vbool32_t test_vmsge_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16_mu( @@ -373,7 +373,7 @@ vbool32_t test_vmsge_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsge_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16_mu( @@ -382,7 +382,7 @@ vbool16_t test_vmsge_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8_mu( @@ -391,7 +391,7 @@ vbool16_t test_vmsge_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsge_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8_mu( @@ -400,6 +400,6 @@ vbool8_t test_vmsge_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsge_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsge_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgeu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgeu.c index 3928e59cf396..274cd3f72ae7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgeu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgeu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsgeu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmsgeu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmsgeu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsgeu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmsgeu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmsgeu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsgeu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmsgeu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmsgeu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsgeu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmsgeu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmsgeu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsgeu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmsgeu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmsgeu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsgeu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmsgeu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmsgeu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgeu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsgeu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1_mu( @@ -130,7 +130,7 @@ vbool1_t test_vmsgeu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgeu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsgeu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64_mu( @@ -139,7 +139,7 @@ vbool1_t test_vmsgeu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsgeu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64_mu( @@ -148,7 +148,7 @@ vbool64_t test_vmsgeu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32_mu( @@ -157,7 +157,7 @@ vbool64_t test_vmsgeu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsgeu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32_mu( @@ -166,7 +166,7 @@ vbool32_t test_vmsgeu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16_mu( @@ -175,7 +175,7 @@ vbool32_t test_vmsgeu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsgeu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16_mu( @@ -184,7 +184,7 @@ vbool16_t test_vmsgeu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8_mu( @@ -193,7 +193,7 @@ vbool16_t test_vmsgeu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsgeu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8_mu( @@ -202,7 +202,7 @@ vbool8_t test_vmsgeu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4_mu( @@ -211,7 +211,7 @@ vbool8_t test_vmsgeu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsgeu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4_mu( @@ -220,7 +220,7 @@ vbool4_t test_vmsgeu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2_mu( @@ -229,7 +229,7 @@ vbool4_t test_vmsgeu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsgeu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2_mu( @@ -238,7 +238,7 @@ vbool2_t test_vmsgeu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsgeu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_mu( @@ -247,7 +247,7 @@ vbool2_t test_vmsgeu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsgeu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_mu( @@ -256,7 +256,7 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32_mu( @@ -265,7 +265,7 @@ vbool64_t test_vmsgeu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsgeu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32_mu( @@ -274,7 +274,7 @@ vbool32_t test_vmsgeu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16_mu( @@ -283,7 +283,7 @@ vbool32_t test_vmsgeu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsgeu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16_mu( @@ -292,7 +292,7 @@ vbool16_t test_vmsgeu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8_mu( @@ -301,7 +301,7 @@ vbool16_t test_vmsgeu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsgeu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8_mu( @@ -310,7 +310,7 @@ vbool8_t test_vmsgeu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4_mu( @@ -319,7 +319,7 @@ vbool8_t test_vmsgeu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsgeu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4_mu( @@ -328,7 +328,7 @@ vbool4_t test_vmsgeu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsgeu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64_mu( @@ -337,7 +337,7 @@ vbool4_t test_vmsgeu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsgeu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64_mu( @@ -346,7 +346,7 @@ vbool64_t test_vmsgeu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32_mu( @@ -355,7 +355,7 @@ vbool64_t test_vmsgeu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsgeu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32_mu( @@ -364,7 +364,7 @@ vbool32_t test_vmsgeu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16_mu( @@ -373,7 +373,7 @@ vbool32_t test_vmsgeu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsgeu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16_mu( @@ -382,7 +382,7 @@ vbool16_t test_vmsgeu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8_mu( @@ -391,7 +391,7 @@ vbool16_t test_vmsgeu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsgeu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8_mu( @@ -400,6 +400,6 @@ vbool8_t test_vmsgeu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsgeu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgeu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgt.c index 9deb92cdb151..8c6d11da6ff2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsgt_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmsgt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmsgt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsgt_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmsgt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmsgt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsgt_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmsgt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmsgt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsgt_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmsgt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmsgt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsgt_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmsgt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmsgt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsgt_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmsgt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmsgt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsgt_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_mu( @@ -130,7 +130,7 @@ vbool1_t test_vmsgt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsgt_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64_mu( @@ -139,7 +139,7 @@ vbool1_t test_vmsgt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsgt_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_mu( @@ -148,7 +148,7 @@ vbool64_t test_vmsgt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32_mu( @@ -157,7 +157,7 @@ vbool64_t test_vmsgt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsgt_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_mu( @@ -166,7 +166,7 @@ vbool32_t test_vmsgt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16_mu( @@ -175,7 +175,7 @@ vbool32_t test_vmsgt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsgt_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_mu( @@ -184,7 +184,7 @@ vbool16_t test_vmsgt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8_mu( @@ -193,7 +193,7 @@ vbool16_t test_vmsgt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsgt_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_mu( @@ -202,7 +202,7 @@ vbool8_t test_vmsgt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4_mu( @@ -211,7 +211,7 @@ vbool8_t test_vmsgt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsgt_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_mu( @@ -220,7 +220,7 @@ vbool4_t test_vmsgt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2_mu( @@ -229,7 +229,7 @@ vbool4_t test_vmsgt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsgt_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_mu( @@ -238,7 +238,7 @@ vbool2_t test_vmsgt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsgt_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_mu( @@ -247,7 +247,7 @@ vbool2_t test_vmsgt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsgt_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_mu( @@ -256,7 +256,7 @@ vbool64_t test_vmsgt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32_mu( @@ -265,7 +265,7 @@ vbool64_t test_vmsgt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsgt_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_mu( @@ -274,7 +274,7 @@ vbool32_t test_vmsgt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16_mu( @@ -283,7 +283,7 @@ vbool32_t test_vmsgt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsgt_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_mu( @@ -292,7 +292,7 @@ vbool16_t test_vmsgt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8_mu( @@ -301,7 +301,7 @@ vbool16_t test_vmsgt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsgt_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_mu( @@ -310,7 +310,7 @@ vbool8_t test_vmsgt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4_mu( @@ -319,7 +319,7 @@ vbool8_t test_vmsgt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsgt_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_mu( @@ -328,7 +328,7 @@ vbool4_t test_vmsgt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsgt_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64_mu( @@ -337,7 +337,7 @@ vbool4_t test_vmsgt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsgt_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_mu( @@ -346,7 +346,7 @@ vbool64_t test_vmsgt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32_mu( @@ -355,7 +355,7 @@ vbool64_t test_vmsgt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsgt_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_mu( @@ -364,7 +364,7 @@ vbool32_t test_vmsgt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16_mu( @@ -373,7 +373,7 @@ vbool32_t test_vmsgt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsgt_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_mu( @@ -382,7 +382,7 @@ vbool16_t test_vmsgt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8_mu( @@ -391,7 +391,7 @@ vbool16_t test_vmsgt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsgt_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_mu( @@ -400,6 +400,6 @@ vbool8_t test_vmsgt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsgt_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgt_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgtu.c index 9bbb1f825008..668252c0f0ed 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgtu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsgtu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsgtu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmsgtu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmsgtu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsgtu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmsgtu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmsgtu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsgtu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmsgtu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmsgtu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsgtu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmsgtu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmsgtu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsgtu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmsgtu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmsgtu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsgtu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmsgtu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmsgtu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgtu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsgtu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_mu( @@ -130,7 +130,7 @@ vbool1_t test_vmsgtu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgtu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsgtu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64_mu( @@ -139,7 +139,7 @@ vbool1_t test_vmsgtu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsgtu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_mu( @@ -148,7 +148,7 @@ vbool64_t test_vmsgtu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32_mu( @@ -157,7 +157,7 @@ vbool64_t test_vmsgtu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsgtu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_mu( @@ -166,7 +166,7 @@ vbool32_t test_vmsgtu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16_mu( @@ -175,7 +175,7 @@ vbool32_t test_vmsgtu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsgtu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_mu( @@ -184,7 +184,7 @@ vbool16_t test_vmsgtu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8_mu( @@ -193,7 +193,7 @@ vbool16_t test_vmsgtu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsgtu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_mu( @@ -202,7 +202,7 @@ vbool8_t test_vmsgtu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4_mu( @@ -211,7 +211,7 @@ vbool8_t test_vmsgtu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsgtu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_mu( @@ -220,7 +220,7 @@ vbool4_t test_vmsgtu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2_mu( @@ -229,7 +229,7 @@ vbool4_t test_vmsgtu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsgtu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_mu( @@ -238,7 +238,7 @@ vbool2_t test_vmsgtu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsgtu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_mu( @@ -247,7 +247,7 @@ vbool2_t test_vmsgtu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsgtu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_mu( @@ -256,7 +256,7 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32_mu( @@ -265,7 +265,7 @@ vbool64_t test_vmsgtu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsgtu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_mu( @@ -274,7 +274,7 @@ vbool32_t test_vmsgtu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16_mu( @@ -283,7 +283,7 @@ vbool32_t test_vmsgtu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsgtu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_mu( @@ -292,7 +292,7 @@ vbool16_t test_vmsgtu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8_mu( @@ -301,7 +301,7 @@ vbool16_t test_vmsgtu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsgtu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_mu( @@ -310,7 +310,7 @@ vbool8_t test_vmsgtu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4_mu( @@ -319,7 +319,7 @@ vbool8_t test_vmsgtu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsgtu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_mu( @@ -328,7 +328,7 @@ vbool4_t test_vmsgtu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsgtu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64_mu( @@ -337,7 +337,7 @@ vbool4_t test_vmsgtu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsgtu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_mu( @@ -346,7 +346,7 @@ vbool64_t test_vmsgtu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32_mu( @@ -355,7 +355,7 @@ vbool64_t test_vmsgtu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsgtu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_mu( @@ -364,7 +364,7 @@ vbool32_t test_vmsgtu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16_mu( @@ -373,7 +373,7 @@ vbool32_t test_vmsgtu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsgtu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_mu( @@ -382,7 +382,7 @@ vbool16_t test_vmsgtu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8_mu( @@ -391,7 +391,7 @@ vbool16_t test_vmsgtu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsgtu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_mu( @@ -400,6 +400,6 @@ vbool8_t test_vmsgtu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsgtu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsgtu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsif.c index 4b28f6d17352..5d7040ea4ea4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsif.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsif.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsif_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return vmsif_m_b1_mu(mask, maskedoff, op1, vl); + return __riscv_vmsif_m_b1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b2_mu( @@ -21,7 +21,7 @@ vbool1_t test_vmsif_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsif_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return vmsif_m_b2_mu(mask, maskedoff, op1, vl); + return __riscv_vmsif_m_b2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b4_mu( @@ -30,7 +30,7 @@ vbool2_t test_vmsif_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsif_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return vmsif_m_b4_mu(mask, maskedoff, op1, vl); + return __riscv_vmsif_m_b4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b8_mu( @@ -39,7 +39,7 @@ vbool4_t test_vmsif_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsif_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return vmsif_m_b8_mu(mask, maskedoff, op1, vl); + return __riscv_vmsif_m_b8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b16_mu( @@ -48,7 +48,7 @@ vbool8_t test_vmsif_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsif_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return vmsif_m_b16_mu(mask, maskedoff, op1, vl); + return __riscv_vmsif_m_b16_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b32_mu( @@ -57,7 +57,7 @@ vbool16_t test_vmsif_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsif_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return vmsif_m_b32_mu(mask, maskedoff, op1, vl); + return __riscv_vmsif_m_b32_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsif_m_b64_mu( @@ -66,6 +66,6 @@ vbool32_t test_vmsif_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsif_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return vmsif_m_b64_mu(mask, maskedoff, op1, vl); + return __riscv_vmsif_m_b64_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsle.c index e7015558cfad..4c4fd545f69e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsle.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsle_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmsle_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmsle_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsle_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmsle_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmsle_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsle_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmsle_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmsle_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsle_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmsle_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmsle_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsle_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmsle_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmsle_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsle_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmsle_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmsle_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsle_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_mu( @@ -130,7 +130,7 @@ vbool1_t test_vmsle_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsle_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_mu( @@ -139,7 +139,7 @@ vbool1_t test_vmsle_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsle_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_mu( @@ -148,7 +148,7 @@ vbool64_t test_vmsle_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_mu( @@ -157,7 +157,7 @@ vbool64_t test_vmsle_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsle_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_mu( @@ -166,7 +166,7 @@ vbool32_t test_vmsle_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_mu( @@ -175,7 +175,7 @@ vbool32_t test_vmsle_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsle_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_mu( @@ -184,7 +184,7 @@ vbool16_t test_vmsle_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_mu( @@ -193,7 +193,7 @@ vbool16_t test_vmsle_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsle_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_mu( @@ -202,7 +202,7 @@ vbool8_t test_vmsle_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_mu( @@ -211,7 +211,7 @@ vbool8_t test_vmsle_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsle_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_mu( @@ -220,7 +220,7 @@ vbool4_t test_vmsle_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_mu( @@ -229,7 +229,7 @@ vbool4_t test_vmsle_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsle_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_mu( @@ -238,7 +238,7 @@ vbool2_t test_vmsle_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsle_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_mu( @@ -247,7 +247,7 @@ vbool2_t test_vmsle_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsle_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_mu( @@ -256,7 +256,7 @@ vbool64_t test_vmsle_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_mu( @@ -265,7 +265,7 @@ vbool64_t test_vmsle_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsle_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_mu( @@ -274,7 +274,7 @@ vbool32_t test_vmsle_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_mu( @@ -283,7 +283,7 @@ vbool32_t test_vmsle_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsle_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_mu( @@ -292,7 +292,7 @@ vbool16_t test_vmsle_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_mu( @@ -301,7 +301,7 @@ vbool16_t test_vmsle_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsle_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_mu( @@ -310,7 +310,7 @@ vbool8_t test_vmsle_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_mu( @@ -319,7 +319,7 @@ vbool8_t test_vmsle_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsle_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_mu( @@ -328,7 +328,7 @@ vbool4_t test_vmsle_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsle_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_mu( @@ -337,7 +337,7 @@ vbool4_t test_vmsle_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsle_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_mu( @@ -346,7 +346,7 @@ vbool64_t test_vmsle_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_mu( @@ -355,7 +355,7 @@ vbool64_t test_vmsle_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsle_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_mu( @@ -364,7 +364,7 @@ vbool32_t test_vmsle_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_mu( @@ -373,7 +373,7 @@ vbool32_t test_vmsle_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsle_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_mu( @@ -382,7 +382,7 @@ vbool16_t test_vmsle_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_mu( @@ -391,7 +391,7 @@ vbool16_t test_vmsle_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsle_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_mu( @@ -400,6 +400,6 @@ vbool8_t test_vmsle_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsle_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsle_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsleu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsleu.c index a51c84c3852d..7766f32ac68c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsleu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsleu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsleu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmsleu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmsleu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsleu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmsleu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmsleu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsleu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmsleu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmsleu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsleu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmsleu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmsleu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsleu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmsleu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmsleu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsleu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmsleu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmsleu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsleu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_mu( @@ -130,7 +130,7 @@ vbool1_t test_vmsleu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsleu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_mu( @@ -139,7 +139,7 @@ vbool1_t test_vmsleu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsleu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_mu( @@ -148,7 +148,7 @@ vbool64_t test_vmsleu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_mu( @@ -157,7 +157,7 @@ vbool64_t test_vmsleu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsleu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_mu( @@ -166,7 +166,7 @@ vbool32_t test_vmsleu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_mu( @@ -175,7 +175,7 @@ vbool32_t test_vmsleu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsleu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_mu( @@ -184,7 +184,7 @@ vbool16_t test_vmsleu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_mu( @@ -193,7 +193,7 @@ vbool16_t test_vmsleu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsleu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_mu( @@ -202,7 +202,7 @@ vbool8_t test_vmsleu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_mu( @@ -211,7 +211,7 @@ vbool8_t test_vmsleu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsleu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_mu( @@ -220,7 +220,7 @@ vbool4_t test_vmsleu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_mu( @@ -229,7 +229,7 @@ vbool4_t test_vmsleu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsleu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_mu( @@ -238,7 +238,7 @@ vbool2_t test_vmsleu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsleu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_mu( @@ -247,7 +247,7 @@ vbool2_t test_vmsleu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsleu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_mu( @@ -256,7 +256,7 @@ vbool64_t test_vmsleu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_mu( @@ -265,7 +265,7 @@ vbool64_t test_vmsleu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsleu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_mu( @@ -274,7 +274,7 @@ vbool32_t test_vmsleu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_mu( @@ -283,7 +283,7 @@ vbool32_t test_vmsleu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsleu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_mu( @@ -292,7 +292,7 @@ vbool16_t test_vmsleu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_mu( @@ -301,7 +301,7 @@ vbool16_t test_vmsleu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsleu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_mu( @@ -310,7 +310,7 @@ vbool8_t test_vmsleu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_mu( @@ -319,7 +319,7 @@ vbool8_t test_vmsleu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsleu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_mu( @@ -328,7 +328,7 @@ vbool4_t test_vmsleu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsleu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_mu( @@ -337,7 +337,7 @@ vbool4_t test_vmsleu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsleu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_mu( @@ -346,7 +346,7 @@ vbool64_t test_vmsleu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_mu( @@ -355,7 +355,7 @@ vbool64_t test_vmsleu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsleu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_mu( @@ -364,7 +364,7 @@ vbool32_t test_vmsleu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_mu( @@ -373,7 +373,7 @@ vbool32_t test_vmsleu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsleu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_mu( @@ -382,7 +382,7 @@ vbool16_t test_vmsleu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_mu( @@ -391,7 +391,7 @@ vbool16_t test_vmsleu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsleu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_mu( @@ -400,6 +400,6 @@ vbool8_t test_vmsleu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsleu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsleu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmslt.c index f3fb1e1d08c3..f440d6949ed7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmslt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmslt.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmslt_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmslt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmslt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmslt_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmslt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmslt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmslt_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmslt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmslt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmslt_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmslt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmslt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmslt_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmslt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmslt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmslt_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmslt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmslt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmslt_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_mu( @@ -130,7 +130,7 @@ vbool1_t test_vmslt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmslt_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_mu( @@ -139,7 +139,7 @@ vbool1_t test_vmslt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmslt_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_mu( @@ -148,7 +148,7 @@ vbool64_t test_vmslt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_mu( @@ -157,7 +157,7 @@ vbool64_t test_vmslt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmslt_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_mu( @@ -166,7 +166,7 @@ vbool32_t test_vmslt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_mu( @@ -175,7 +175,7 @@ vbool32_t test_vmslt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmslt_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_mu( @@ -184,7 +184,7 @@ vbool16_t test_vmslt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_mu( @@ -193,7 +193,7 @@ vbool16_t test_vmslt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmslt_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_mu( @@ -202,7 +202,7 @@ vbool8_t test_vmslt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_mu( @@ -211,7 +211,7 @@ vbool8_t test_vmslt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmslt_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_mu( @@ -220,7 +220,7 @@ vbool4_t test_vmslt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_mu( @@ -229,7 +229,7 @@ vbool4_t test_vmslt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmslt_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_mu( @@ -238,7 +238,7 @@ vbool2_t test_vmslt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmslt_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_mu( @@ -247,7 +247,7 @@ vbool2_t test_vmslt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmslt_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_mu( @@ -256,7 +256,7 @@ vbool64_t test_vmslt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_mu( @@ -265,7 +265,7 @@ vbool64_t test_vmslt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmslt_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_mu( @@ -274,7 +274,7 @@ vbool32_t test_vmslt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_mu( @@ -283,7 +283,7 @@ vbool32_t test_vmslt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmslt_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_mu( @@ -292,7 +292,7 @@ vbool16_t test_vmslt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_mu( @@ -301,7 +301,7 @@ vbool16_t test_vmslt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmslt_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_mu( @@ -310,7 +310,7 @@ vbool8_t test_vmslt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_mu( @@ -319,7 +319,7 @@ vbool8_t test_vmslt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmslt_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_mu( @@ -328,7 +328,7 @@ vbool4_t test_vmslt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmslt_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_mu( @@ -337,7 +337,7 @@ vbool4_t test_vmslt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmslt_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_mu( @@ -346,7 +346,7 @@ vbool64_t test_vmslt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_mu( @@ -355,7 +355,7 @@ vbool64_t test_vmslt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmslt_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_mu( @@ -364,7 +364,7 @@ vbool32_t test_vmslt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_mu( @@ -373,7 +373,7 @@ vbool32_t test_vmslt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmslt_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_mu( @@ -382,7 +382,7 @@ vbool16_t test_vmslt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_mu( @@ -391,7 +391,7 @@ vbool16_t test_vmslt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmslt_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_mu( @@ -400,6 +400,6 @@ vbool8_t test_vmslt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmslt_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmslt_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsltu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsltu.c index 60acf14d1c10..1c14767e0b05 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsltu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsltu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsltu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmsltu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmsltu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsltu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmsltu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmsltu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsltu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmsltu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmsltu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsltu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmsltu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmsltu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsltu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmsltu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmsltu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsltu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmsltu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmsltu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsltu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_mu( @@ -130,7 +130,7 @@ vbool1_t test_vmsltu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsltu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_mu( @@ -139,7 +139,7 @@ vbool1_t test_vmsltu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsltu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_mu( @@ -148,7 +148,7 @@ vbool64_t test_vmsltu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_mu( @@ -157,7 +157,7 @@ vbool64_t test_vmsltu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsltu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_mu( @@ -166,7 +166,7 @@ vbool32_t test_vmsltu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_mu( @@ -175,7 +175,7 @@ vbool32_t test_vmsltu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsltu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_mu( @@ -184,7 +184,7 @@ vbool16_t test_vmsltu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_mu( @@ -193,7 +193,7 @@ vbool16_t test_vmsltu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsltu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_mu( @@ -202,7 +202,7 @@ vbool8_t test_vmsltu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_mu( @@ -211,7 +211,7 @@ vbool8_t test_vmsltu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsltu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_mu( @@ -220,7 +220,7 @@ vbool4_t test_vmsltu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_mu( @@ -229,7 +229,7 @@ vbool4_t test_vmsltu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsltu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_mu( @@ -238,7 +238,7 @@ vbool2_t test_vmsltu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsltu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_mu( @@ -247,7 +247,7 @@ vbool2_t test_vmsltu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsltu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_mu( @@ -256,7 +256,7 @@ vbool64_t test_vmsltu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_mu( @@ -265,7 +265,7 @@ vbool64_t test_vmsltu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsltu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_mu( @@ -274,7 +274,7 @@ vbool32_t test_vmsltu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_mu( @@ -283,7 +283,7 @@ vbool32_t test_vmsltu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsltu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_mu( @@ -292,7 +292,7 @@ vbool16_t test_vmsltu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_mu( @@ -301,7 +301,7 @@ vbool16_t test_vmsltu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsltu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_mu( @@ -310,7 +310,7 @@ vbool8_t test_vmsltu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_mu( @@ -319,7 +319,7 @@ vbool8_t test_vmsltu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsltu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_mu( @@ -328,7 +328,7 @@ vbool4_t test_vmsltu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsltu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_mu( @@ -337,7 +337,7 @@ vbool4_t test_vmsltu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsltu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_mu( @@ -346,7 +346,7 @@ vbool64_t test_vmsltu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_mu( @@ -355,7 +355,7 @@ vbool64_t test_vmsltu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsltu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_mu( @@ -364,7 +364,7 @@ vbool32_t test_vmsltu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_mu( @@ -373,7 +373,7 @@ vbool32_t test_vmsltu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsltu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_mu( @@ -382,7 +382,7 @@ vbool16_t test_vmsltu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_mu( @@ -391,7 +391,7 @@ vbool16_t test_vmsltu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsltu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_mu( @@ -400,6 +400,6 @@ vbool8_t test_vmsltu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsltu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsltu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsne.c index 8e907a9e5efb..a139e9492fce 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsne.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmsne_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_mu( @@ -22,7 +22,7 @@ vbool64_t test_vmsne_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_mu( @@ -31,7 +31,7 @@ vbool64_t test_vmsne_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmsne_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_mu( @@ -40,7 +40,7 @@ vbool32_t test_vmsne_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_mu( @@ -49,7 +49,7 @@ vbool32_t test_vmsne_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmsne_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_mu( @@ -58,7 +58,7 @@ vbool16_t test_vmsne_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_mu( @@ -67,7 +67,7 @@ vbool16_t test_vmsne_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmsne_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_mu( @@ -76,7 +76,7 @@ vbool8_t test_vmsne_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_mu( @@ -85,7 +85,7 @@ vbool8_t test_vmsne_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmsne_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_mu( @@ -94,7 +94,7 @@ vbool4_t test_vmsne_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_mu( @@ -103,7 +103,7 @@ vbool4_t test_vmsne_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmsne_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_mu( @@ -112,7 +112,7 @@ vbool2_t test_vmsne_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_mu( @@ -121,7 +121,7 @@ vbool2_t test_vmsne_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmsne_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_mu( @@ -130,7 +130,7 @@ vbool1_t test_vmsne_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmsne_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_mu( @@ -139,7 +139,7 @@ vbool1_t test_vmsne_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmsne_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_mu( @@ -148,7 +148,7 @@ vbool64_t test_vmsne_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_mu( @@ -157,7 +157,7 @@ vbool64_t test_vmsne_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmsne_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_mu( @@ -166,7 +166,7 @@ vbool32_t test_vmsne_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_mu( @@ -175,7 +175,7 @@ vbool32_t test_vmsne_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmsne_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_mu( @@ -184,7 +184,7 @@ vbool16_t test_vmsne_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_mu( @@ -193,7 +193,7 @@ vbool16_t test_vmsne_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmsne_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_mu( @@ -202,7 +202,7 @@ vbool8_t test_vmsne_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_mu( @@ -211,7 +211,7 @@ vbool8_t test_vmsne_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmsne_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_mu( @@ -220,7 +220,7 @@ vbool4_t test_vmsne_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_mu( @@ -229,7 +229,7 @@ vbool4_t test_vmsne_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmsne_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_mu( @@ -238,7 +238,7 @@ vbool2_t test_vmsne_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmsne_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_mu( @@ -247,7 +247,7 @@ vbool2_t test_vmsne_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmsne_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_mu( @@ -256,7 +256,7 @@ vbool64_t test_vmsne_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_mu( @@ -265,7 +265,7 @@ vbool64_t test_vmsne_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmsne_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_mu( @@ -274,7 +274,7 @@ vbool32_t test_vmsne_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_mu( @@ -283,7 +283,7 @@ vbool32_t test_vmsne_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmsne_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_mu( @@ -292,7 +292,7 @@ vbool16_t test_vmsne_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_mu( @@ -301,7 +301,7 @@ vbool16_t test_vmsne_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmsne_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_mu( @@ -310,7 +310,7 @@ vbool8_t test_vmsne_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_mu( @@ -319,7 +319,7 @@ vbool8_t test_vmsne_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmsne_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_mu( @@ -328,7 +328,7 @@ vbool4_t test_vmsne_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmsne_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_mu( @@ -337,7 +337,7 @@ vbool4_t test_vmsne_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmsne_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_mu( @@ -346,7 +346,7 @@ vbool64_t test_vmsne_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_mu( @@ -355,7 +355,7 @@ vbool64_t test_vmsne_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmsne_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_mu( @@ -364,7 +364,7 @@ vbool32_t test_vmsne_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_mu( @@ -373,7 +373,7 @@ vbool32_t test_vmsne_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmsne_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_mu( @@ -382,7 +382,7 @@ vbool16_t test_vmsne_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_mu( @@ -391,7 +391,7 @@ vbool16_t test_vmsne_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmsne_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_mu( @@ -400,7 +400,7 @@ vbool8_t test_vmsne_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmsne_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_mu( @@ -409,7 +409,7 @@ vbool8_t test_vmsne_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmsne_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_mu( @@ -418,7 +418,7 @@ vbool64_t test_vmsne_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_mu( @@ -427,7 +427,7 @@ vbool64_t test_vmsne_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmsne_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_mu( @@ -436,7 +436,7 @@ vbool32_t test_vmsne_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_mu( @@ -445,7 +445,7 @@ vbool32_t test_vmsne_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmsne_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_mu( @@ -454,7 +454,7 @@ vbool16_t test_vmsne_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_mu( @@ -463,7 +463,7 @@ vbool16_t test_vmsne_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmsne_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_mu( @@ -472,7 +472,7 @@ vbool8_t test_vmsne_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_mu( @@ -481,7 +481,7 @@ vbool8_t test_vmsne_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmsne_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_mu( @@ -490,7 +490,7 @@ vbool4_t test_vmsne_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_mu( @@ -499,7 +499,7 @@ vbool4_t test_vmsne_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmsne_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_mu( @@ -508,7 +508,7 @@ vbool2_t test_vmsne_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_mu( @@ -517,7 +517,7 @@ vbool2_t test_vmsne_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmsne_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_mu( @@ -526,7 +526,7 @@ vbool1_t test_vmsne_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmsne_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_mu( @@ -535,7 +535,7 @@ vbool1_t test_vmsne_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmsne_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_mu( @@ -544,7 +544,7 @@ vbool64_t test_vmsne_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_mu( @@ -553,7 +553,7 @@ vbool64_t test_vmsne_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmsne_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_mu( @@ -562,7 +562,7 @@ vbool32_t test_vmsne_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_mu( @@ -571,7 +571,7 @@ vbool32_t test_vmsne_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmsne_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_mu( @@ -580,7 +580,7 @@ vbool16_t test_vmsne_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_mu( @@ -589,7 +589,7 @@ vbool16_t test_vmsne_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmsne_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_mu( @@ -598,7 +598,7 @@ vbool8_t test_vmsne_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_mu( @@ -607,7 +607,7 @@ vbool8_t test_vmsne_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmsne_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_mu( @@ -616,7 +616,7 @@ vbool4_t test_vmsne_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_mu( @@ -625,7 +625,7 @@ vbool4_t test_vmsne_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmsne_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_mu( @@ -634,7 +634,7 @@ vbool2_t test_vmsne_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmsne_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_mu( @@ -643,7 +643,7 @@ vbool2_t test_vmsne_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmsne_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_mu( @@ -652,7 +652,7 @@ vbool64_t test_vmsne_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_mu( @@ -661,7 +661,7 @@ vbool64_t test_vmsne_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmsne_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_mu( @@ -670,7 +670,7 @@ vbool32_t test_vmsne_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_mu( @@ -679,7 +679,7 @@ vbool32_t test_vmsne_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmsne_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_mu( @@ -688,7 +688,7 @@ vbool16_t test_vmsne_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_mu( @@ -697,7 +697,7 @@ vbool16_t test_vmsne_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmsne_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_mu( @@ -706,7 +706,7 @@ vbool8_t test_vmsne_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_mu( @@ -715,7 +715,7 @@ vbool8_t test_vmsne_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmsne_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_mu( @@ -724,7 +724,7 @@ vbool4_t test_vmsne_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmsne_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_mu( @@ -733,7 +733,7 @@ vbool4_t test_vmsne_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmsne_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_mu( @@ -742,7 +742,7 @@ vbool64_t test_vmsne_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_mu( @@ -751,7 +751,7 @@ vbool64_t test_vmsne_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmsne_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_mu( @@ -760,7 +760,7 @@ vbool32_t test_vmsne_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_mu( @@ -769,7 +769,7 @@ vbool32_t test_vmsne_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmsne_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_mu( @@ -778,7 +778,7 @@ vbool16_t test_vmsne_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_mu( @@ -787,7 +787,7 @@ vbool16_t test_vmsne_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmsne_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_mu( @@ -796,6 +796,6 @@ vbool8_t test_vmsne_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmsne_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmsne_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsof.c index d760538a63fd..f12153203ef5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsof.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmsof.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsof_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return vmsof_m_b1_mu(mask, maskedoff, op1, vl); + return __riscv_vmsof_m_b1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b2_mu( @@ -21,7 +21,7 @@ vbool1_t test_vmsof_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsof_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return vmsof_m_b2_mu(mask, maskedoff, op1, vl); + return __riscv_vmsof_m_b2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b4_mu( @@ -30,7 +30,7 @@ vbool2_t test_vmsof_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsof_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return vmsof_m_b4_mu(mask, maskedoff, op1, vl); + return __riscv_vmsof_m_b4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b8_mu( @@ -39,7 +39,7 @@ vbool4_t test_vmsof_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsof_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return vmsof_m_b8_mu(mask, maskedoff, op1, vl); + return __riscv_vmsof_m_b8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b16_mu( @@ -48,7 +48,7 @@ vbool8_t test_vmsof_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsof_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return vmsof_m_b16_mu(mask, maskedoff, op1, vl); + return __riscv_vmsof_m_b16_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b32_mu( @@ -57,7 +57,7 @@ vbool16_t test_vmsof_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsof_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return vmsof_m_b32_mu(mask, maskedoff, op1, vl); + return __riscv_vmsof_m_b32_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vmsof_m_b64_mu( @@ -66,6 +66,6 @@ vbool32_t test_vmsof_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsof_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return vmsof_m_b64_mu(mask, maskedoff, op1, vl); + return __riscv_vmsof_m_b64_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmul.c index e27248b24380..8b590a73ac09 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmul.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmul_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmul_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmul_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmul_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmul_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmul_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmul_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmul_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmul_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmul_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmul_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmul_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmul_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmul_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmul_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmul_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmul_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmul_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmul_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmul_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmul_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmul_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_tu( @@ -408,7 +408,7 @@ vint64m8_t test_vmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmul_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_tu( @@ -417,7 +417,7 @@ vuint8mf8_t test_vmul_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_tu( @@ -426,7 +426,7 @@ vuint8mf8_t test_vmul_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmul_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_tu( @@ -435,7 +435,7 @@ vuint8mf4_t test_vmul_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_tu( @@ -444,7 +444,7 @@ vuint8mf4_t test_vmul_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmul_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_tu( @@ -453,7 +453,7 @@ vuint8mf2_t test_vmul_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_tu( @@ -462,7 +462,7 @@ vuint8mf2_t test_vmul_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmul_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_tu( @@ -471,7 +471,7 @@ vuint8m1_t test_vmul_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_tu( @@ -480,7 +480,7 @@ vuint8m1_t test_vmul_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmul_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_tu( @@ -489,7 +489,7 @@ vuint8m2_t test_vmul_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_tu( @@ -498,7 +498,7 @@ vuint8m2_t test_vmul_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmul_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_tu( @@ -507,7 +507,7 @@ vuint8m4_t test_vmul_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_tu( @@ -516,7 +516,7 @@ vuint8m4_t test_vmul_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmul_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_tu( @@ -525,7 +525,7 @@ vuint8m8_t test_vmul_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_tu( @@ -534,7 +534,7 @@ vuint8m8_t test_vmul_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmul_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_tu( @@ -543,7 +543,7 @@ vuint16mf4_t test_vmul_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_tu( @@ -552,7 +552,7 @@ vuint16mf4_t test_vmul_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmul_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_tu( @@ -561,7 +561,7 @@ vuint16mf2_t test_vmul_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_tu( @@ -570,7 +570,7 @@ vuint16mf2_t test_vmul_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmul_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_tu( @@ -579,7 +579,7 @@ vuint16m1_t test_vmul_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_tu( @@ -588,7 +588,7 @@ vuint16m1_t test_vmul_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmul_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_tu( @@ -597,7 +597,7 @@ vuint16m2_t test_vmul_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_tu( @@ -606,7 +606,7 @@ vuint16m2_t test_vmul_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmul_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_tu( @@ -615,7 +615,7 @@ vuint16m4_t test_vmul_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_tu( @@ -624,7 +624,7 @@ vuint16m4_t test_vmul_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmul_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_tu( @@ -633,7 +633,7 @@ vuint16m8_t test_vmul_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tu( @@ -642,7 +642,7 @@ vuint16m8_t test_vmul_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmul_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tu( @@ -651,7 +651,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_tu( @@ -660,7 +660,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmul_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_tu( @@ -669,7 +669,7 @@ vuint32m1_t test_vmul_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_tu( @@ -678,7 +678,7 @@ vuint32m1_t test_vmul_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmul_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_tu( @@ -687,7 +687,7 @@ vuint32m2_t test_vmul_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_tu( @@ -696,7 +696,7 @@ vuint32m2_t test_vmul_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmul_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_tu( @@ -705,7 +705,7 @@ vuint32m4_t test_vmul_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_tu( @@ -714,7 +714,7 @@ vuint32m4_t test_vmul_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmul_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_tu( @@ -723,7 +723,7 @@ vuint32m8_t test_vmul_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_tu( @@ -732,7 +732,7 @@ vuint32m8_t test_vmul_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmul_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_tu( @@ -741,7 +741,7 @@ vuint64m1_t test_vmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_tu( @@ -750,7 +750,7 @@ vuint64m1_t test_vmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmul_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_tu( @@ -759,7 +759,7 @@ vuint64m2_t test_vmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_tu( @@ -768,7 +768,7 @@ vuint64m2_t test_vmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmul_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_tu( @@ -777,7 +777,7 @@ vuint64m4_t test_vmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_tu( @@ -786,7 +786,7 @@ vuint64m4_t test_vmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmul_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_tu( @@ -795,7 +795,7 @@ vuint64m8_t test_vmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_tum( @@ -804,7 +804,7 @@ vuint64m8_t test_vmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmul_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_tum( @@ -813,7 +813,7 @@ vint8mf8_t test_vmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_tum( @@ -822,7 +822,7 @@ vint8mf8_t test_vmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmul_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_tum( @@ -831,7 +831,7 @@ vint8mf4_t test_vmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_tum( @@ -840,7 +840,7 @@ vint8mf4_t test_vmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmul_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_tum( @@ -849,7 +849,7 @@ vint8mf2_t test_vmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_tum( @@ -858,7 +858,7 @@ vint8mf2_t test_vmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmul_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_tum( @@ -867,7 +867,7 @@ vint8m1_t test_vmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_tum( @@ -876,7 +876,7 @@ vint8m1_t test_vmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmul_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_tum( @@ -885,7 +885,7 @@ vint8m2_t test_vmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_tum( @@ -894,7 +894,7 @@ vint8m2_t test_vmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmul_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_tum( @@ -903,7 +903,7 @@ vint8m4_t test_vmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_tum( @@ -912,7 +912,7 @@ vint8m4_t test_vmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmul_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_tum( @@ -921,7 +921,7 @@ vint8m8_t test_vmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_tum( @@ -930,7 +930,7 @@ vint8m8_t test_vmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_tum( @@ -939,7 +939,7 @@ vint16mf4_t test_vmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_tum( @@ -948,7 +948,7 @@ vint16mf4_t test_vmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_tum( @@ -957,7 +957,7 @@ vint16mf2_t test_vmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_tum( @@ -966,7 +966,7 @@ vint16mf2_t test_vmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmul_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_tum( @@ -975,7 +975,7 @@ vint16m1_t test_vmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_tum( @@ -984,7 +984,7 @@ vint16m1_t test_vmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmul_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_tum( @@ -993,7 +993,7 @@ vint16m2_t test_vmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_tum( @@ -1002,7 +1002,7 @@ vint16m2_t test_vmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmul_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_tum( @@ -1011,7 +1011,7 @@ vint16m4_t test_vmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_tum( @@ -1020,7 +1020,7 @@ vint16m4_t test_vmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmul_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_tum( @@ -1029,7 +1029,7 @@ vint16m8_t test_vmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tum( @@ -1038,7 +1038,7 @@ vint16m8_t test_vmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tum( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_tum( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmul_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_tum( @@ -1065,7 +1065,7 @@ vint32m1_t test_vmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_tum( @@ -1074,7 +1074,7 @@ vint32m1_t test_vmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmul_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_tum( @@ -1083,7 +1083,7 @@ vint32m2_t test_vmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_tum( @@ -1092,7 +1092,7 @@ vint32m2_t test_vmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmul_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_tum( @@ -1101,7 +1101,7 @@ vint32m4_t test_vmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_tum( @@ -1110,7 +1110,7 @@ vint32m4_t test_vmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmul_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_tum( @@ -1119,7 +1119,7 @@ vint32m8_t test_vmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_tum( @@ -1128,7 +1128,7 @@ vint32m8_t test_vmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmul_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_tum( @@ -1137,7 +1137,7 @@ vint64m1_t test_vmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_tum( @@ -1146,7 +1146,7 @@ vint64m1_t test_vmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmul_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_tum( @@ -1155,7 +1155,7 @@ vint64m2_t test_vmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_tum( @@ -1164,7 +1164,7 @@ vint64m2_t test_vmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmul_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_tum( @@ -1173,7 +1173,7 @@ vint64m4_t test_vmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_tum( @@ -1182,7 +1182,7 @@ vint64m4_t test_vmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmul_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_tum( @@ -1191,7 +1191,7 @@ vint64m8_t test_vmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_tum( @@ -1200,7 +1200,7 @@ vint64m8_t test_vmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmul_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_tum( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vmul_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_tum( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vmul_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmul_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_tum( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vmul_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_tum( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vmul_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmul_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_tum( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vmul_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_tum( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vmul_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmul_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_tum( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vmul_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_tum( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vmul_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmul_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_tum( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vmul_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_tum( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vmul_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmul_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_tum( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vmul_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_tum( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vmul_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmul_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_tum( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vmul_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_tum( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vmul_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmul_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_tum( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vmul_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_tum( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vmul_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmul_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_tum( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vmul_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_tum( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vmul_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmul_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_tum( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vmul_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_tum( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vmul_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmul_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_tum( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vmul_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_tum( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vmul_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmul_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_tum( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vmul_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_tum( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vmul_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmul_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_tum( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vmul_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tum( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vmul_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmul_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tum( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_tum( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmul_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_tum( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vmul_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_tum( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vmul_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmul_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_tum( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vmul_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_tum( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vmul_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmul_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_tum( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vmul_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_tum( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vmul_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmul_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_tum( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vmul_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_tum( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vmul_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmul_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_tum( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_tum( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmul_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_tum( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_tum( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmul_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_tum( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_tum( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmul_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_tum( @@ -1587,7 +1587,7 @@ vuint64m8_t test_vmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_tumu( @@ -1596,7 +1596,7 @@ vuint64m8_t test_vmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmul_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_tumu( @@ -1605,7 +1605,7 @@ vint8mf8_t test_vmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_tumu( @@ -1614,7 +1614,7 @@ vint8mf8_t test_vmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmul_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_tumu( @@ -1623,7 +1623,7 @@ vint8mf4_t test_vmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_tumu( @@ -1632,7 +1632,7 @@ vint8mf4_t test_vmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmul_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_tumu( @@ -1641,7 +1641,7 @@ vint8mf2_t test_vmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_tumu( @@ -1650,7 +1650,7 @@ vint8mf2_t test_vmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmul_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_tumu( @@ -1659,7 +1659,7 @@ vint8m1_t test_vmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_tumu( @@ -1668,7 +1668,7 @@ vint8m1_t test_vmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmul_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_tumu( @@ -1677,7 +1677,7 @@ vint8m2_t test_vmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_tumu( @@ -1686,7 +1686,7 @@ vint8m2_t test_vmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmul_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_tumu( @@ -1695,7 +1695,7 @@ vint8m4_t test_vmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_tumu( @@ -1704,7 +1704,7 @@ vint8m4_t test_vmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmul_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_tumu( @@ -1713,7 +1713,7 @@ vint8m8_t test_vmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_tumu( @@ -1722,7 +1722,7 @@ vint8m8_t test_vmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_tumu( @@ -1731,7 +1731,7 @@ vint16mf4_t test_vmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_tumu( @@ -1740,7 +1740,7 @@ vint16mf4_t test_vmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_tumu( @@ -1749,7 +1749,7 @@ vint16mf2_t test_vmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_tumu( @@ -1758,7 +1758,7 @@ vint16mf2_t test_vmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_tumu( @@ -1767,7 +1767,7 @@ vint16m1_t test_vmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_tumu( @@ -1776,7 +1776,7 @@ vint16m1_t test_vmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_tumu( @@ -1785,7 +1785,7 @@ vint16m2_t test_vmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_tumu( @@ -1794,7 +1794,7 @@ vint16m2_t test_vmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_tumu( @@ -1803,7 +1803,7 @@ vint16m4_t test_vmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_tumu( @@ -1812,7 +1812,7 @@ vint16m4_t test_vmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_tumu( @@ -1821,7 +1821,7 @@ vint16m8_t test_vmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_tumu( @@ -1830,7 +1830,7 @@ vint16m8_t test_vmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_tumu( @@ -1839,7 +1839,7 @@ vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_tumu( @@ -1848,7 +1848,7 @@ vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_tumu( @@ -1857,7 +1857,7 @@ vint32m1_t test_vmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_tumu( @@ -1866,7 +1866,7 @@ vint32m1_t test_vmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_tumu( @@ -1875,7 +1875,7 @@ vint32m2_t test_vmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_tumu( @@ -1884,7 +1884,7 @@ vint32m2_t test_vmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_tumu( @@ -1893,7 +1893,7 @@ vint32m4_t test_vmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_tumu( @@ -1902,7 +1902,7 @@ vint32m4_t test_vmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_tumu( @@ -1911,7 +1911,7 @@ vint32m8_t test_vmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_tumu( @@ -1920,7 +1920,7 @@ vint32m8_t test_vmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_tumu( @@ -1929,7 +1929,7 @@ vint64m1_t test_vmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_tumu( @@ -1938,7 +1938,7 @@ vint64m1_t test_vmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_tumu( @@ -1947,7 +1947,7 @@ vint64m2_t test_vmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_tumu( @@ -1956,7 +1956,7 @@ vint64m2_t test_vmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_tumu( @@ -1965,7 +1965,7 @@ vint64m4_t test_vmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_tumu( @@ -1974,7 +1974,7 @@ vint64m4_t test_vmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_tumu( @@ -1983,7 +1983,7 @@ vint64m8_t test_vmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_tumu( @@ -1992,7 +1992,7 @@ vint64m8_t test_vmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmul_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_tumu( @@ -2001,7 +2001,7 @@ vuint8mf8_t test_vmul_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_tumu( @@ -2010,7 +2010,7 @@ vuint8mf8_t test_vmul_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmul_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_tumu( @@ -2019,7 +2019,7 @@ vuint8mf4_t test_vmul_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_tumu( @@ -2028,7 +2028,7 @@ vuint8mf4_t test_vmul_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmul_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_tumu( @@ -2037,7 +2037,7 @@ vuint8mf2_t test_vmul_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_tumu( @@ -2046,7 +2046,7 @@ vuint8mf2_t test_vmul_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmul_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_tumu( @@ -2055,7 +2055,7 @@ vuint8m1_t test_vmul_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_tumu( @@ -2064,7 +2064,7 @@ vuint8m1_t test_vmul_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmul_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_tumu( @@ -2073,7 +2073,7 @@ vuint8m2_t test_vmul_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_tumu( @@ -2082,7 +2082,7 @@ vuint8m2_t test_vmul_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmul_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_tumu( @@ -2091,7 +2091,7 @@ vuint8m4_t test_vmul_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_tumu( @@ -2100,7 +2100,7 @@ vuint8m4_t test_vmul_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmul_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_tumu( @@ -2109,7 +2109,7 @@ vuint8m8_t test_vmul_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_tumu( @@ -2118,7 +2118,7 @@ vuint8m8_t test_vmul_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmul_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_tumu( @@ -2127,7 +2127,7 @@ vuint16mf4_t test_vmul_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_tumu( @@ -2136,7 +2136,7 @@ vuint16mf4_t test_vmul_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmul_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_tumu( @@ -2145,7 +2145,7 @@ vuint16mf2_t test_vmul_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_tumu( @@ -2154,7 +2154,7 @@ vuint16mf2_t test_vmul_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmul_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_tumu( @@ -2163,7 +2163,7 @@ vuint16m1_t test_vmul_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_tumu( @@ -2172,7 +2172,7 @@ vuint16m1_t test_vmul_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmul_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_tumu( @@ -2181,7 +2181,7 @@ vuint16m2_t test_vmul_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_tumu( @@ -2190,7 +2190,7 @@ vuint16m2_t test_vmul_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmul_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_tumu( @@ -2199,7 +2199,7 @@ vuint16m4_t test_vmul_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_tumu( @@ -2208,7 +2208,7 @@ vuint16m4_t test_vmul_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmul_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_tumu( @@ -2217,7 +2217,7 @@ vuint16m8_t test_vmul_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_tumu( @@ -2226,7 +2226,7 @@ vuint16m8_t test_vmul_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmul_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_tumu( @@ -2235,7 +2235,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_tumu( @@ -2244,7 +2244,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmul_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_tumu( @@ -2253,7 +2253,7 @@ vuint32m1_t test_vmul_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_tumu( @@ -2262,7 +2262,7 @@ vuint32m1_t test_vmul_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmul_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_tumu( @@ -2271,7 +2271,7 @@ vuint32m2_t test_vmul_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_tumu( @@ -2280,7 +2280,7 @@ vuint32m2_t test_vmul_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmul_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_tumu( @@ -2289,7 +2289,7 @@ vuint32m4_t test_vmul_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_tumu( @@ -2298,7 +2298,7 @@ vuint32m4_t test_vmul_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmul_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_tumu( @@ -2307,7 +2307,7 @@ vuint32m8_t test_vmul_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_tumu( @@ -2316,7 +2316,7 @@ vuint32m8_t test_vmul_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmul_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_tumu( @@ -2325,7 +2325,7 @@ vuint64m1_t test_vmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_tumu( @@ -2334,7 +2334,7 @@ vuint64m1_t test_vmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmul_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_tumu( @@ -2343,7 +2343,7 @@ vuint64m2_t test_vmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_tumu( @@ -2352,7 +2352,7 @@ vuint64m2_t test_vmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmul_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_tumu( @@ -2361,7 +2361,7 @@ vuint64m4_t test_vmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_tumu( @@ -2370,7 +2370,7 @@ vuint64m4_t test_vmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmul_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_tumu( @@ -2379,7 +2379,7 @@ vuint64m8_t test_vmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_mu( @@ -2388,7 +2388,7 @@ vuint64m8_t test_vmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmul_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_mu( @@ -2397,7 +2397,7 @@ vint8mf8_t test_vmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_mu( @@ -2406,7 +2406,7 @@ vint8mf8_t test_vmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmul_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_mu( @@ -2415,7 +2415,7 @@ vint8mf4_t test_vmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_mu( @@ -2424,7 +2424,7 @@ vint8mf4_t test_vmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmul_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_mu( @@ -2433,7 +2433,7 @@ vint8mf2_t test_vmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_mu( @@ -2442,7 +2442,7 @@ vint8mf2_t test_vmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmul_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_mu( @@ -2451,7 +2451,7 @@ vint8m1_t test_vmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_mu( @@ -2460,7 +2460,7 @@ vint8m1_t test_vmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmul_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_mu( @@ -2469,7 +2469,7 @@ vint8m2_t test_vmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_mu( @@ -2478,7 +2478,7 @@ vint8m2_t test_vmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmul_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_mu( @@ -2487,7 +2487,7 @@ vint8m4_t test_vmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_mu( @@ -2496,7 +2496,7 @@ vint8m4_t test_vmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmul_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_mu( @@ -2505,7 +2505,7 @@ vint8m8_t test_vmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmul_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_mu( @@ -2514,7 +2514,7 @@ vint8m8_t test_vmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_mu( @@ -2523,7 +2523,7 @@ vint16mf4_t test_vmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_mu( @@ -2532,7 +2532,7 @@ vint16mf4_t test_vmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_mu( @@ -2541,7 +2541,7 @@ vint16mf2_t test_vmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_mu( @@ -2550,7 +2550,7 @@ vint16mf2_t test_vmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmul_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_mu( @@ -2559,7 +2559,7 @@ vint16m1_t test_vmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_mu( @@ -2568,7 +2568,7 @@ vint16m1_t test_vmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmul_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_mu( @@ -2577,7 +2577,7 @@ vint16m2_t test_vmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_mu( @@ -2586,7 +2586,7 @@ vint16m2_t test_vmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmul_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_mu( @@ -2595,7 +2595,7 @@ vint16m4_t test_vmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_mu( @@ -2604,7 +2604,7 @@ vint16m4_t test_vmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmul_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_mu( @@ -2613,7 +2613,7 @@ vint16m8_t test_vmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmul_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_mu( @@ -2622,7 +2622,7 @@ vint16m8_t test_vmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_mu( @@ -2631,7 +2631,7 @@ vint32mf2_t test_vmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_mu( @@ -2640,7 +2640,7 @@ vint32mf2_t test_vmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmul_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_mu( @@ -2649,7 +2649,7 @@ vint32m1_t test_vmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_mu( @@ -2658,7 +2658,7 @@ vint32m1_t test_vmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmul_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_mu( @@ -2667,7 +2667,7 @@ vint32m2_t test_vmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_mu( @@ -2676,7 +2676,7 @@ vint32m2_t test_vmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmul_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_mu( @@ -2685,7 +2685,7 @@ vint32m4_t test_vmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_mu( @@ -2694,7 +2694,7 @@ vint32m4_t test_vmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmul_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_mu( @@ -2703,7 +2703,7 @@ vint32m8_t test_vmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmul_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_mu( @@ -2712,7 +2712,7 @@ vint32m8_t test_vmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmul_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_mu( @@ -2721,7 +2721,7 @@ vint64m1_t test_vmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_mu( @@ -2730,7 +2730,7 @@ vint64m1_t test_vmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmul_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_mu( @@ -2739,7 +2739,7 @@ vint64m2_t test_vmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_mu( @@ -2748,7 +2748,7 @@ vint64m2_t test_vmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmul_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_mu( @@ -2757,7 +2757,7 @@ vint64m4_t test_vmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_mu( @@ -2766,7 +2766,7 @@ vint64m4_t test_vmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmul_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_mu( @@ -2775,7 +2775,7 @@ vint64m8_t test_vmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmul_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_mu( @@ -2784,7 +2784,7 @@ vint64m8_t test_vmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmul_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_mu( @@ -2793,7 +2793,7 @@ vuint8mf8_t test_vmul_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_mu( @@ -2802,7 +2802,7 @@ vuint8mf8_t test_vmul_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmul_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_mu( @@ -2811,7 +2811,7 @@ vuint8mf4_t test_vmul_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_mu( @@ -2820,7 +2820,7 @@ vuint8mf4_t test_vmul_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmul_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_mu( @@ -2829,7 +2829,7 @@ vuint8mf2_t test_vmul_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_mu( @@ -2838,7 +2838,7 @@ vuint8mf2_t test_vmul_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmul_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_mu( @@ -2847,7 +2847,7 @@ vuint8m1_t test_vmul_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_mu( @@ -2856,7 +2856,7 @@ vuint8m1_t test_vmul_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmul_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_mu( @@ -2865,7 +2865,7 @@ vuint8m2_t test_vmul_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_mu( @@ -2874,7 +2874,7 @@ vuint8m2_t test_vmul_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmul_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_mu( @@ -2883,7 +2883,7 @@ vuint8m4_t test_vmul_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_mu( @@ -2892,7 +2892,7 @@ vuint8m4_t test_vmul_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmul_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_mu( @@ -2901,7 +2901,7 @@ vuint8m8_t test_vmul_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmul_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_mu( @@ -2910,7 +2910,7 @@ vuint8m8_t test_vmul_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmul_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_mu( @@ -2919,7 +2919,7 @@ vuint16mf4_t test_vmul_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_mu( @@ -2928,7 +2928,7 @@ vuint16mf4_t test_vmul_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmul_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_mu( @@ -2937,7 +2937,7 @@ vuint16mf2_t test_vmul_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_mu( @@ -2946,7 +2946,7 @@ vuint16mf2_t test_vmul_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmul_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_mu( @@ -2955,7 +2955,7 @@ vuint16m1_t test_vmul_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_mu( @@ -2964,7 +2964,7 @@ vuint16m1_t test_vmul_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmul_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_mu( @@ -2973,7 +2973,7 @@ vuint16m2_t test_vmul_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_mu( @@ -2982,7 +2982,7 @@ vuint16m2_t test_vmul_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmul_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_mu( @@ -2991,7 +2991,7 @@ vuint16m4_t test_vmul_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_mu( @@ -3000,7 +3000,7 @@ vuint16m4_t test_vmul_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmul_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_mu( @@ -3009,7 +3009,7 @@ vuint16m8_t test_vmul_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmul_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_mu( @@ -3018,7 +3018,7 @@ vuint16m8_t test_vmul_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmul_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_mu( @@ -3027,7 +3027,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_mu( @@ -3036,7 +3036,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmul_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_mu( @@ -3045,7 +3045,7 @@ vuint32m1_t test_vmul_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_mu( @@ -3054,7 +3054,7 @@ vuint32m1_t test_vmul_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmul_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_mu( @@ -3063,7 +3063,7 @@ vuint32m2_t test_vmul_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_mu( @@ -3072,7 +3072,7 @@ vuint32m2_t test_vmul_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmul_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_mu( @@ -3081,7 +3081,7 @@ vuint32m4_t test_vmul_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_mu( @@ -3090,7 +3090,7 @@ vuint32m4_t test_vmul_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmul_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_mu( @@ -3099,7 +3099,7 @@ vuint32m8_t test_vmul_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmul_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_mu( @@ -3108,7 +3108,7 @@ vuint32m8_t test_vmul_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmul_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_mu( @@ -3117,7 +3117,7 @@ vuint64m1_t test_vmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_mu( @@ -3126,7 +3126,7 @@ vuint64m1_t test_vmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmul_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_mu( @@ -3135,7 +3135,7 @@ vuint64m2_t test_vmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_mu( @@ -3144,7 +3144,7 @@ vuint64m2_t test_vmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmul_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_mu( @@ -3153,7 +3153,7 @@ vuint64m4_t test_vmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_mu( @@ -3162,7 +3162,7 @@ vuint64m4_t test_vmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmul_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_mu( @@ -3171,6 +3171,6 @@ vuint64m8_t test_vmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmul_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmul_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulh.c index 4b81aa3d5cfd..621d129ea80c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulh.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulh.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vmulh_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vmulh_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vmulh_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vmulh_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vmulh_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vmulh_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vmulh_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vmulh_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vmulh_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vmulh_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vmulh_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vmulh_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vmulh_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vmulh_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vmulh_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vmulh_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vmulh_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vmulh_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vmulh_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vmulh_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vmulh_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vmulh_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vmulh_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vmulh_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vmulh_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vmulh_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vmulh_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vmulh_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vmulh_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vmulh_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vmulh_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vmulh_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vmulh_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vmulh_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vmulh_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vmulh_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vmulh_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vmulh_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vmulh_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vmulh_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vmulh_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vmulh_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vmulh_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vmulh_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vmulh_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vmulh_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vmulh_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vmulh_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vmulh_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vmulh_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vmulh_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vmulh_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vmulh_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vmulh_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vmulh_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vmulh_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vmulh_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vmulh_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vmulh_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vmulh_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vmulh_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vmulh_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vmulh_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vmulh_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vmulh_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vmulh_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vmulh_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vmulh_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vmulh_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vmulh_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vmulh_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vmulh_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vmulh_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vmulh_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vmulh_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vmulh_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vmulh_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vmulh_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vmulh_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vmulh_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vmulh_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vmulh_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vmulh_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vmulh_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vmulh_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vmulh_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vmulh_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vmulh_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vmulh_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vmulh_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vmulh_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vmulh_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vmulh_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vmulh_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vmulh_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vmulh_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vmulh_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vmulh_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vmulh_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vmulh_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vmulh_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vmulh_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vmulh_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vmulh_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vmulh_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vmulh_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vmulh_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vmulh_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vmulh_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vmulh_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vmulh_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vmulh_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vmulh_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vmulh_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vmulh_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vmulh_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vmulh_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vmulh_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vmulh_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vmulh_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vmulh_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vmulh_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vmulh_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vmulh_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vmulh_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vmulh_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vmulh_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulh_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulhsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulhsu.c index b8fac829fc4b..f189df0ae665 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulhsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulhsu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vmulhsu_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vmulhsu_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vmulhsu_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vmulhsu_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vmulhsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vmulhsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vmulhsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vmulhsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vmulhsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vmulhsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vmulhsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vmulhsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vmulhsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vmulhsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vmulhsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulhu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulhu.c index 2f609db9ed89..245f6db4fd4d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulhu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmulhu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_tu( @@ -138,7 +138,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_tu( @@ -147,7 +147,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_tu( @@ -156,7 +156,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_tu( @@ -165,7 +165,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_tu( @@ -174,7 +174,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_tu( @@ -183,7 +183,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_tu( @@ -210,7 +210,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_tu( @@ -219,7 +219,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_tu( @@ -228,7 +228,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_tu( @@ -237,7 +237,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_tu( @@ -291,7 +291,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_tu( @@ -300,7 +300,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_tu( @@ -309,7 +309,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_tu( @@ -318,7 +318,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_tu( @@ -327,7 +327,7 @@ vuint32m8_t test_vmulhu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_tu( @@ -336,7 +336,7 @@ vuint32m8_t test_vmulhu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_tu( @@ -345,7 +345,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_tu( @@ -354,7 +354,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_tu( @@ -363,7 +363,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_tu( @@ -372,7 +372,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_tu( @@ -381,7 +381,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_tu( @@ -390,7 +390,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m8_t test_vmulhu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vmulhu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_tum( @@ -417,7 +417,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_tum( @@ -426,7 +426,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_tum( @@ -435,7 +435,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_tum( @@ -444,7 +444,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_tum( @@ -453,7 +453,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_tum( @@ -462,7 +462,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_tum( @@ -471,7 +471,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_tum( @@ -480,7 +480,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_tum( @@ -489,7 +489,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_tum( @@ -498,7 +498,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_tum( @@ -507,7 +507,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_tum( @@ -516,7 +516,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_tum( @@ -525,7 +525,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_tum( @@ -534,7 +534,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_tum( @@ -543,7 +543,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_tum( @@ -552,7 +552,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_tum( @@ -561,7 +561,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_tum( @@ -570,7 +570,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_tum( @@ -579,7 +579,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_tum( @@ -588,7 +588,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_tum( @@ -597,7 +597,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_tum( @@ -606,7 +606,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_tum( @@ -615,7 +615,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_tum( @@ -624,7 +624,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_tum( @@ -633,7 +633,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tum( @@ -642,7 +642,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tum( @@ -651,7 +651,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_tum( @@ -660,7 +660,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_tum( @@ -669,7 +669,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_tum( @@ -678,7 +678,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_tum( @@ -687,7 +687,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_tum( @@ -696,7 +696,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_tum( @@ -705,7 +705,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_tum( @@ -714,7 +714,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_tum( @@ -723,7 +723,7 @@ vuint32m8_t test_vmulhu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_tum( @@ -732,7 +732,7 @@ vuint32m8_t test_vmulhu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_tum( @@ -741,7 +741,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_tum( @@ -750,7 +750,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_tum( @@ -759,7 +759,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_tum( @@ -768,7 +768,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_tum( @@ -777,7 +777,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_tum( @@ -786,7 +786,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m8_t test_vmulhu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vmulhu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_tumu( @@ -813,7 +813,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_tumu( @@ -822,7 +822,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_tumu( @@ -831,7 +831,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_tumu( @@ -840,7 +840,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_tumu( @@ -849,7 +849,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_tumu( @@ -858,7 +858,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_tumu( @@ -867,7 +867,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_tumu( @@ -876,7 +876,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_tumu( @@ -885,7 +885,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_tumu( @@ -894,7 +894,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_tumu( @@ -903,7 +903,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_tumu( @@ -912,7 +912,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_tumu( @@ -921,7 +921,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_tumu( @@ -930,7 +930,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_tumu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_tumu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_tumu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_tumu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_tumu( @@ -975,7 +975,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_tumu( @@ -984,7 +984,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_tumu( @@ -993,7 +993,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_tumu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_tumu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_tumu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_tumu( @@ -1029,7 +1029,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_tumu( @@ -1038,7 +1038,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_tumu( @@ -1047,7 +1047,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_tumu( @@ -1056,7 +1056,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_tumu( @@ -1065,7 +1065,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_tumu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_tumu( @@ -1083,7 +1083,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_tumu( @@ -1092,7 +1092,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_tumu( @@ -1101,7 +1101,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_tumu( @@ -1110,7 +1110,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_tumu( @@ -1119,7 +1119,7 @@ vuint32m8_t test_vmulhu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_tumu( @@ -1128,7 +1128,7 @@ vuint32m8_t test_vmulhu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_tumu( @@ -1137,7 +1137,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_tumu( @@ -1146,7 +1146,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_tumu( @@ -1155,7 +1155,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_tumu( @@ -1164,7 +1164,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_tumu( @@ -1173,7 +1173,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_tumu( @@ -1182,7 +1182,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m8_t test_vmulhu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vmulhu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_mu( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_mu( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_mu( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_mu( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_mu( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_mu( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_mu( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_mu( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_mu( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_mu( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_mu( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_mu( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_mu( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_mu( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_mu( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_mu( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_mu( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_mu( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_mu( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_mu( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_mu( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_mu( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_mu( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_mu( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_mu( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_mu( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_mu( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_mu( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_mu( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_mu( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_mu( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_mu( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_mu( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_mu( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_mu( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vmulhu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_mu( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vmulhu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_mu( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_mu( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_mu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_mu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_mu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_mu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vmulhu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vmulhu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c index f75a7b349a9c..f3431a700796 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_v_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t vl) { - return vmv_v_v_i8mf8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf8_tu( @@ -22,7 +22,7 @@ vint8mf8_t test_vmv_v_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_v_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) { - return vmv_v_x_i8mf8_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4_tu( @@ -31,7 +31,7 @@ vint8mf8_t test_vmv_v_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmv_v_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t vl) { - return vmv_v_v_i8mf4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf4_tu( @@ -40,7 +40,7 @@ vint8mf4_t test_vmv_v_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmv_v_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) { - return vmv_v_x_i8mf4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2_tu( @@ -49,7 +49,7 @@ vint8mf4_t test_vmv_v_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmv_v_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t vl) { - return vmv_v_v_i8mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf2_tu( @@ -58,7 +58,7 @@ vint8mf2_t test_vmv_v_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmv_v_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) { - return vmv_v_x_i8mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8m1_tu( @@ -67,7 +67,7 @@ vint8mf2_t test_vmv_v_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_v_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t vl) { - return vmv_v_v_i8m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8m1_tu( @@ -76,7 +76,7 @@ vint8m1_t test_vmv_v_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_v_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) { - return vmv_v_x_i8m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8m2_tu( @@ -85,7 +85,7 @@ vint8m1_t test_vmv_v_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_v_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t vl) { - return vmv_v_v_i8m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8m2_tu( @@ -94,7 +94,7 @@ vint8m2_t test_vmv_v_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_v_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) { - return vmv_v_x_i8m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8m4_tu( @@ -103,7 +103,7 @@ vint8m2_t test_vmv_v_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_v_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t vl) { - return vmv_v_v_i8m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8m4_tu( @@ -112,7 +112,7 @@ vint8m4_t test_vmv_v_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_v_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) { - return vmv_v_x_i8m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i8m8_tu( @@ -121,7 +121,7 @@ vint8m4_t test_vmv_v_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_v_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t vl) { - return vmv_v_v_i8m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i8m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i8m8_tu( @@ -130,7 +130,7 @@ vint8m8_t test_vmv_v_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_v_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) { - return vmv_v_x_i8m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i8m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4_tu( @@ -139,7 +139,7 @@ vint8m8_t test_vmv_v_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_v_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return vmv_v_v_i16mf4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf4_tu( @@ -148,7 +148,7 @@ vint16mf4_t test_vmv_v_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_v_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl) { - return vmv_v_x_i16mf4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2_tu( @@ -157,7 +157,7 @@ vint16mf4_t test_vmv_v_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_v_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return vmv_v_v_i16mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf2_tu( @@ -166,7 +166,7 @@ vint16mf2_t test_vmv_v_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_v_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl) { - return vmv_v_x_i16mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16m1_tu( @@ -175,7 +175,7 @@ vint16mf2_t test_vmv_v_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_v_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t vl) { - return vmv_v_v_i16m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16m1_tu( @@ -184,7 +184,7 @@ vint16m1_t test_vmv_v_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_v_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) { - return vmv_v_x_i16m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16m2_tu( @@ -193,7 +193,7 @@ vint16m1_t test_vmv_v_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_v_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t vl) { - return vmv_v_v_i16m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16m2_tu( @@ -202,7 +202,7 @@ vint16m2_t test_vmv_v_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_v_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) { - return vmv_v_x_i16m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16m4_tu( @@ -211,7 +211,7 @@ vint16m2_t test_vmv_v_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_v_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t vl) { - return vmv_v_v_i16m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16m4_tu( @@ -220,7 +220,7 @@ vint16m4_t test_vmv_v_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_v_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) { - return vmv_v_x_i16m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i16m8_tu( @@ -229,7 +229,7 @@ vint16m4_t test_vmv_v_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_v_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t vl) { - return vmv_v_v_i16m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i16m8_tu( @@ -238,7 +238,7 @@ vint16m8_t test_vmv_v_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_v_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) { - return vmv_v_x_i16m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_tu( @@ -247,7 +247,7 @@ vint16m8_t test_vmv_v_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_v_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return vmv_v_v_i32mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2_tu( @@ -256,7 +256,7 @@ vint32mf2_t test_vmv_v_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_v_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl) { - return vmv_v_x_i32mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i32m1_tu( @@ -265,7 +265,7 @@ vint32mf2_t test_vmv_v_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_v_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t vl) { - return vmv_v_v_i32m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i32m1_tu( @@ -274,7 +274,7 @@ vint32m1_t test_vmv_v_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_v_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) { - return vmv_v_x_i32m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i32m2_tu( @@ -283,7 +283,7 @@ vint32m1_t test_vmv_v_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_v_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t vl) { - return vmv_v_v_i32m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i32m2_tu( @@ -292,7 +292,7 @@ vint32m2_t test_vmv_v_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_v_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) { - return vmv_v_x_i32m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i32m4_tu( @@ -301,7 +301,7 @@ vint32m2_t test_vmv_v_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_v_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t vl) { - return vmv_v_v_i32m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i32m4_tu( @@ -310,7 +310,7 @@ vint32m4_t test_vmv_v_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_v_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) { - return vmv_v_x_i32m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i32m8_tu( @@ -319,7 +319,7 @@ vint32m4_t test_vmv_v_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_v_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t vl) { - return vmv_v_v_i32m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i32m8_tu( @@ -328,7 +328,7 @@ vint32m8_t test_vmv_v_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_v_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) { - return vmv_v_x_i32m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i64m1_tu( @@ -337,7 +337,7 @@ vint32m8_t test_vmv_v_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_v_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t vl) { - return vmv_v_v_i64m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i64m1_tu( @@ -346,7 +346,7 @@ vint64m1_t test_vmv_v_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_v_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) { - return vmv_v_x_i64m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i64m2_tu( @@ -355,7 +355,7 @@ vint64m1_t test_vmv_v_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_v_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t vl) { - return vmv_v_v_i64m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i64m2_tu( @@ -364,7 +364,7 @@ vint64m2_t test_vmv_v_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_v_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) { - return vmv_v_x_i64m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i64m4_tu( @@ -373,7 +373,7 @@ vint64m2_t test_vmv_v_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_v_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t vl) { - return vmv_v_v_i64m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i64m4_tu( @@ -382,7 +382,7 @@ vint64m4_t test_vmv_v_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_v_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) { - return vmv_v_x_i64m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_i64m8_tu( @@ -391,7 +391,7 @@ vint64m4_t test_vmv_v_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_v_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t vl) { - return vmv_v_v_i64m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_i64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_i64m8_tu( @@ -400,7 +400,7 @@ vint64m8_t test_vmv_v_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_v_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) { - return vmv_v_x_i64m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_i64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8_tu( @@ -409,7 +409,7 @@ vint64m8_t test_vmv_v_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_v_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t vl) { - return vmv_v_v_u8mf8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf8_tu( @@ -418,7 +418,7 @@ vuint8mf8_t test_vmv_v_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_v_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl) { - return vmv_v_x_u8mf8_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4_tu( @@ -427,7 +427,7 @@ vuint8mf8_t test_vmv_v_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_v_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t vl) { - return vmv_v_v_u8mf4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf4_tu( @@ -436,7 +436,7 @@ vuint8mf4_t test_vmv_v_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_v_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl) { - return vmv_v_x_u8mf4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2_tu( @@ -445,7 +445,7 @@ vuint8mf4_t test_vmv_v_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_v_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t vl) { - return vmv_v_v_u8mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf2_tu( @@ -454,7 +454,7 @@ vuint8mf2_t test_vmv_v_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_v_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl) { - return vmv_v_x_u8mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8m1_tu( @@ -463,7 +463,7 @@ vuint8mf2_t test_vmv_v_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_v_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t vl) { - return vmv_v_v_u8m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8m1_tu( @@ -472,7 +472,7 @@ vuint8m1_t test_vmv_v_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_v_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) { - return vmv_v_x_u8m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8m2_tu( @@ -481,7 +481,7 @@ vuint8m1_t test_vmv_v_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_v_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t vl) { - return vmv_v_v_u8m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8m2_tu( @@ -490,7 +490,7 @@ vuint8m2_t test_vmv_v_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_v_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) { - return vmv_v_x_u8m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8m4_tu( @@ -499,7 +499,7 @@ vuint8m2_t test_vmv_v_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmv_v_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t vl) { - return vmv_v_v_u8m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8m4_tu( @@ -508,7 +508,7 @@ vuint8m4_t test_vmv_v_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmv_v_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) { - return vmv_v_x_u8m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u8m8_tu( @@ -517,7 +517,7 @@ vuint8m4_t test_vmv_v_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmv_v_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t vl) { - return vmv_v_v_u8m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u8m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u8m8_tu( @@ -526,7 +526,7 @@ vuint8m8_t test_vmv_v_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmv_v_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) { - return vmv_v_x_u8m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u8m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4_tu( @@ -535,7 +535,7 @@ vuint8m8_t test_vmv_v_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmv_v_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return vmv_v_v_u16mf4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf4_tu( @@ -544,7 +544,7 @@ vuint16mf4_t test_vmv_v_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmv_v_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t vl) { - return vmv_v_x_u16mf4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2_tu( @@ -553,7 +553,7 @@ vuint16mf4_t test_vmv_v_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmv_v_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return vmv_v_v_u16mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf2_tu( @@ -562,7 +562,7 @@ vuint16mf2_t test_vmv_v_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmv_v_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t vl) { - return vmv_v_x_u16mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16m1_tu( @@ -571,7 +571,7 @@ vuint16mf2_t test_vmv_v_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmv_v_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return vmv_v_v_u16m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16m1_tu( @@ -580,7 +580,7 @@ vuint16m1_t test_vmv_v_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmv_v_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl) { - return vmv_v_x_u16m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16m2_tu( @@ -589,7 +589,7 @@ vuint16m1_t test_vmv_v_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmv_v_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return vmv_v_v_u16m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16m2_tu( @@ -598,7 +598,7 @@ vuint16m2_t test_vmv_v_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmv_v_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl) { - return vmv_v_x_u16m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16m4_tu( @@ -607,7 +607,7 @@ vuint16m2_t test_vmv_v_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmv_v_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return vmv_v_v_u16m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16m4_tu( @@ -616,7 +616,7 @@ vuint16m4_t test_vmv_v_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmv_v_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl) { - return vmv_v_x_u16m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u16m8_tu( @@ -625,7 +625,7 @@ vuint16m4_t test_vmv_v_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmv_v_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return vmv_v_v_u16m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u16m8_tu( @@ -634,7 +634,7 @@ vuint16m8_t test_vmv_v_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmv_v_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl) { - return vmv_v_x_u16m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_tu( @@ -643,7 +643,7 @@ vuint16m8_t test_vmv_v_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmv_v_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return vmv_v_v_u32mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2_tu( @@ -652,7 +652,7 @@ vuint32mf2_t test_vmv_v_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmv_v_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t vl) { - return vmv_v_x_u32mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u32m1_tu( @@ -661,7 +661,7 @@ vuint32mf2_t test_vmv_v_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmv_v_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return vmv_v_v_u32m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u32m1_tu( @@ -670,7 +670,7 @@ vuint32m1_t test_vmv_v_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmv_v_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl) { - return vmv_v_x_u32m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u32m2_tu( @@ -679,7 +679,7 @@ vuint32m1_t test_vmv_v_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmv_v_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return vmv_v_v_u32m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u32m2_tu( @@ -688,7 +688,7 @@ vuint32m2_t test_vmv_v_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmv_v_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl) { - return vmv_v_x_u32m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u32m4_tu( @@ -697,7 +697,7 @@ vuint32m2_t test_vmv_v_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmv_v_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return vmv_v_v_u32m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u32m4_tu( @@ -706,7 +706,7 @@ vuint32m4_t test_vmv_v_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmv_v_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl) { - return vmv_v_x_u32m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u32m8_tu( @@ -715,7 +715,7 @@ vuint32m4_t test_vmv_v_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmv_v_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return vmv_v_v_u32m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u32m8_tu( @@ -724,7 +724,7 @@ vuint32m8_t test_vmv_v_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmv_v_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl) { - return vmv_v_x_u32m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u64m1_tu( @@ -733,7 +733,7 @@ vuint32m8_t test_vmv_v_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmv_v_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return vmv_v_v_u64m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u64m1_tu( @@ -742,7 +742,7 @@ vuint64m1_t test_vmv_v_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmv_v_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl) { - return vmv_v_x_u64m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u64m2_tu( @@ -751,7 +751,7 @@ vuint64m1_t test_vmv_v_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmv_v_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return vmv_v_v_u64m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u64m2_tu( @@ -760,7 +760,7 @@ vuint64m2_t test_vmv_v_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmv_v_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl) { - return vmv_v_x_u64m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u64m4_tu( @@ -769,7 +769,7 @@ vuint64m2_t test_vmv_v_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmv_v_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return vmv_v_v_u64m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u64m4_tu( @@ -778,7 +778,7 @@ vuint64m4_t test_vmv_v_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmv_v_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl) { - return vmv_v_x_u64m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_u64m8_tu( @@ -787,7 +787,7 @@ vuint64m4_t test_vmv_v_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmv_v_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return vmv_v_v_u64m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_u64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_x_u64m8_tu( @@ -796,7 +796,7 @@ vuint64m8_t test_vmv_v_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmv_v_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl) { - return vmv_v_x_u64m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_x_u64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4_tu( @@ -805,7 +805,7 @@ vuint64m8_t test_vmv_v_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vmv_v_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vmv_v_v_f16mf4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2_tu( @@ -814,7 +814,7 @@ vfloat16mf4_t test_vmv_v_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vmv_v_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vmv_v_v_f16mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16m1_tu( @@ -823,7 +823,7 @@ vfloat16mf2_t test_vmv_v_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vmv_v_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vmv_v_v_f16m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16m2_tu( @@ -832,7 +832,7 @@ vfloat16m1_t test_vmv_v_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vmv_v_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vmv_v_v_f16m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16m4_tu( @@ -841,7 +841,7 @@ vfloat16m2_t test_vmv_v_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vmv_v_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vmv_v_v_f16m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f16m8_tu( @@ -850,7 +850,7 @@ vfloat16m4_t test_vmv_v_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vmv_v_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vmv_v_v_f16m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_tu( @@ -859,7 +859,7 @@ vfloat16m8_t test_vmv_v_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vmv_v_v_f32mf2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f32m1_tu( @@ -868,7 +868,7 @@ vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vmv_v_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vmv_v_v_f32m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f32m2_tu( @@ -877,7 +877,7 @@ vfloat32m1_t test_vmv_v_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vmv_v_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vmv_v_v_f32m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f32m4_tu( @@ -886,7 +886,7 @@ vfloat32m2_t test_vmv_v_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vmv_v_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vmv_v_v_f32m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f32m8_tu( @@ -895,7 +895,7 @@ vfloat32m4_t test_vmv_v_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vmv_v_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vmv_v_v_f32m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f64m1_tu( @@ -904,7 +904,7 @@ vfloat32m8_t test_vmv_v_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vmv_v_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vmv_v_v_f64m1_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f64m2_tu( @@ -913,7 +913,7 @@ vfloat64m1_t test_vmv_v_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vmv_v_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vmv_v_v_f64m2_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f64m4_tu( @@ -922,7 +922,7 @@ vfloat64m2_t test_vmv_v_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vmv_v_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vmv_v_v_f64m4_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_v_v_f64m8_tu( @@ -931,7 +931,7 @@ vfloat64m4_t test_vmv_v_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vmv_v_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vmv_v_v_f64m8_tu(maskedoff, src, vl); + return __riscv_vmv_v_v_f64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8_tu( @@ -940,7 +940,7 @@ vfloat64m8_t test_vmv_v_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_s_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) { - return vmv_s_x_i8mf8_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4_tu( @@ -949,7 +949,7 @@ vint8mf8_t test_vmv_s_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmv_s_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) { - return vmv_s_x_i8mf4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2_tu( @@ -958,7 +958,7 @@ vint8mf4_t test_vmv_s_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmv_s_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) { - return vmv_s_x_i8mf2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m1_tu( @@ -967,7 +967,7 @@ vint8mf2_t test_vmv_s_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_s_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) { - return vmv_s_x_i8m1_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m2_tu( @@ -976,7 +976,7 @@ vint8m1_t test_vmv_s_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_s_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) { - return vmv_s_x_i8m2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m4_tu( @@ -985,7 +985,7 @@ vint8m2_t test_vmv_s_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_s_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) { - return vmv_s_x_i8m4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m8_tu( @@ -994,7 +994,7 @@ vint8m4_t test_vmv_s_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_s_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) { - return vmv_s_x_i8m8_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i8m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4_tu( @@ -1003,7 +1003,7 @@ vint8m8_t test_vmv_s_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_s_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl) { - return vmv_s_x_i16mf4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2_tu( @@ -1012,7 +1012,7 @@ vint16mf4_t test_vmv_s_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_s_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl) { - return vmv_s_x_i16mf2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m1_tu( @@ -1021,7 +1021,7 @@ vint16mf2_t test_vmv_s_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_s_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) { - return vmv_s_x_i16m1_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m2_tu( @@ -1030,7 +1030,7 @@ vint16m1_t test_vmv_s_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_s_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) { - return vmv_s_x_i16m2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m4_tu( @@ -1039,7 +1039,7 @@ vint16m2_t test_vmv_s_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_s_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) { - return vmv_s_x_i16m4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m8_tu( @@ -1048,7 +1048,7 @@ vint16m4_t test_vmv_s_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_s_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) { - return vmv_s_x_i16m8_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2_tu( @@ -1057,7 +1057,7 @@ vint16m8_t test_vmv_s_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_s_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl) { - return vmv_s_x_i32mf2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m1_tu( @@ -1066,7 +1066,7 @@ vint32mf2_t test_vmv_s_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_s_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) { - return vmv_s_x_i32m1_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m2_tu( @@ -1075,7 +1075,7 @@ vint32m1_t test_vmv_s_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_s_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) { - return vmv_s_x_i32m2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m4_tu( @@ -1084,7 +1084,7 @@ vint32m2_t test_vmv_s_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_s_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) { - return vmv_s_x_i32m4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m8_tu( @@ -1093,7 +1093,7 @@ vint32m4_t test_vmv_s_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_s_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) { - return vmv_s_x_i32m8_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m1_tu( @@ -1102,7 +1102,7 @@ vint32m8_t test_vmv_s_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_s_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) { - return vmv_s_x_i64m1_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m2_tu( @@ -1111,7 +1111,7 @@ vint64m1_t test_vmv_s_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_s_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) { - return vmv_s_x_i64m2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m4_tu( @@ -1120,7 +1120,7 @@ vint64m2_t test_vmv_s_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_s_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) { - return vmv_s_x_i64m4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m8_tu( @@ -1129,7 +1129,7 @@ vint64m4_t test_vmv_s_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_s_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) { - return vmv_s_x_i64m8_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_i64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8_tu( @@ -1138,7 +1138,7 @@ vint64m8_t test_vmv_s_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_s_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl) { - return vmv_s_x_u8mf8_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4_tu( @@ -1147,7 +1147,7 @@ vuint8mf8_t test_vmv_s_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_s_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl) { - return vmv_s_x_u8mf4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2_tu( @@ -1156,7 +1156,7 @@ vuint8mf4_t test_vmv_s_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_s_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl) { - return vmv_s_x_u8mf2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8m1_tu( @@ -1165,7 +1165,7 @@ vuint8mf2_t test_vmv_s_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_s_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) { - return vmv_s_x_u8m1_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8m2_tu( @@ -1174,7 +1174,7 @@ vuint8m1_t test_vmv_s_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_s_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) { - return vmv_s_x_u8m2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8m4_tu( @@ -1183,7 +1183,7 @@ vuint8m2_t test_vmv_s_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmv_s_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) { - return vmv_s_x_u8m4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8m8_tu( @@ -1192,7 +1192,7 @@ vuint8m4_t test_vmv_s_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmv_s_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) { - return vmv_s_x_u8m8_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u8m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4_tu( @@ -1201,7 +1201,7 @@ vuint8m8_t test_vmv_s_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmv_s_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t vl) { - return vmv_s_x_u16mf4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2_tu( @@ -1210,7 +1210,7 @@ vuint16mf4_t test_vmv_s_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmv_s_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t vl) { - return vmv_s_x_u16mf2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m1_tu( @@ -1219,7 +1219,7 @@ vuint16mf2_t test_vmv_s_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmv_s_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl) { - return vmv_s_x_u16m1_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m2_tu( @@ -1228,7 +1228,7 @@ vuint16m1_t test_vmv_s_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmv_s_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl) { - return vmv_s_x_u16m2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m4_tu( @@ -1237,7 +1237,7 @@ vuint16m2_t test_vmv_s_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmv_s_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl) { - return vmv_s_x_u16m4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m8_tu( @@ -1246,7 +1246,7 @@ vuint16m4_t test_vmv_s_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmv_s_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl) { - return vmv_s_x_u16m8_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2_tu( @@ -1255,7 +1255,7 @@ vuint16m8_t test_vmv_s_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmv_s_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t vl) { - return vmv_s_x_u32mf2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m1_tu( @@ -1264,7 +1264,7 @@ vuint32mf2_t test_vmv_s_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmv_s_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl) { - return vmv_s_x_u32m1_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m2_tu( @@ -1273,7 +1273,7 @@ vuint32m1_t test_vmv_s_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmv_s_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl) { - return vmv_s_x_u32m2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m4_tu( @@ -1282,7 +1282,7 @@ vuint32m2_t test_vmv_s_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmv_s_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl) { - return vmv_s_x_u32m4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m8_tu( @@ -1291,7 +1291,7 @@ vuint32m4_t test_vmv_s_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmv_s_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl) { - return vmv_s_x_u32m8_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m1_tu( @@ -1300,7 +1300,7 @@ vuint32m8_t test_vmv_s_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmv_s_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl) { - return vmv_s_x_u64m1_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m2_tu( @@ -1309,7 +1309,7 @@ vuint64m1_t test_vmv_s_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmv_s_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl) { - return vmv_s_x_u64m2_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m4_tu( @@ -1318,7 +1318,7 @@ vuint64m2_t test_vmv_s_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmv_s_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl) { - return vmv_s_x_u64m4_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m8_tu( @@ -1327,6 +1327,6 @@ vuint64m4_t test_vmv_s_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmv_s_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl) { - return vmv_s_x_u64m8_tu(maskedoff, src, vl); + return __riscv_vmv_s_x_u64m8_tu(maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclip.c index 2fa7cd1eb9ee..586efbed5544 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclip.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclip_wv_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclip_wv_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclip_wv_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclip_wv_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclip_wv_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclip_wv_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclip_wv_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_tu( @@ -129,7 +129,7 @@ vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_tu( @@ -138,7 +138,7 @@ vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclip_wv_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_tu( @@ -147,7 +147,7 @@ vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_tu( @@ -156,7 +156,7 @@ vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclip_wv_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_tu( @@ -165,7 +165,7 @@ vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_tu( @@ -174,7 +174,7 @@ vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclip_wv_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_tu( @@ -183,7 +183,7 @@ vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_tu( @@ -192,7 +192,7 @@ vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclip_wv_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_tu( @@ -201,7 +201,7 @@ vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tu( @@ -210,7 +210,7 @@ vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclip_wv_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tu( @@ -219,7 +219,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_tu( @@ -228,7 +228,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclip_wv_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_tu( @@ -237,7 +237,7 @@ vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_tu( @@ -246,7 +246,7 @@ vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclip_wv_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_tu( @@ -255,7 +255,7 @@ vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_tu( @@ -264,7 +264,7 @@ vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclip_wv_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_tu( @@ -273,7 +273,7 @@ vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_tum( @@ -282,7 +282,7 @@ vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclip_wv_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_tum( @@ -291,7 +291,7 @@ vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_tum( @@ -300,7 +300,7 @@ vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclip_wv_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_tum( @@ -309,7 +309,7 @@ vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_tum( @@ -318,7 +318,7 @@ vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclip_wv_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_tum( @@ -327,7 +327,7 @@ vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_tum( @@ -336,7 +336,7 @@ vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclip_wv_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_tum( @@ -345,7 +345,7 @@ vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_tum( @@ -354,7 +354,7 @@ vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclip_wv_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_tum( @@ -363,7 +363,7 @@ vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_tum( @@ -372,7 +372,7 @@ vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclip_wv_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_tum( @@ -381,7 +381,7 @@ vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_tum( @@ -390,7 +390,7 @@ vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclip_wv_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_tum( @@ -399,7 +399,7 @@ vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_tum( @@ -408,7 +408,7 @@ vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclip_wv_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_tum( @@ -417,7 +417,7 @@ vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_tum( @@ -426,7 +426,7 @@ vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclip_wv_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_tum( @@ -435,7 +435,7 @@ vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_tum( @@ -444,7 +444,7 @@ vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclip_wv_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_tum( @@ -453,7 +453,7 @@ vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_tum( @@ -462,7 +462,7 @@ vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclip_wv_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_tum( @@ -471,7 +471,7 @@ vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tum( @@ -480,7 +480,7 @@ vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclip_wv_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tum( @@ -489,7 +489,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_tum( @@ -498,7 +498,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclip_wv_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_tum( @@ -507,7 +507,7 @@ vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_tum( @@ -516,7 +516,7 @@ vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclip_wv_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_tum( @@ -525,7 +525,7 @@ vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_tum( @@ -534,7 +534,7 @@ vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclip_wv_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_tum( @@ -543,7 +543,7 @@ vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_tumu( @@ -552,7 +552,7 @@ vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclip_wv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_tumu( @@ -561,7 +561,7 @@ vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_tumu( @@ -570,7 +570,7 @@ vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclip_wv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_tumu( @@ -579,7 +579,7 @@ vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_tumu( @@ -588,7 +588,7 @@ vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclip_wv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_tumu( @@ -597,7 +597,7 @@ vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_tumu( @@ -606,7 +606,7 @@ vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclip_wv_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_tumu( @@ -615,7 +615,7 @@ vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_tumu( @@ -624,7 +624,7 @@ vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclip_wv_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_tumu( @@ -633,7 +633,7 @@ vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_tumu( @@ -642,7 +642,7 @@ vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclip_wv_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_tumu( @@ -651,7 +651,7 @@ vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_tumu( @@ -660,7 +660,7 @@ vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclip_wv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_tumu( @@ -669,7 +669,7 @@ vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_tumu( @@ -678,7 +678,7 @@ vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclip_wv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_tumu( @@ -687,7 +687,7 @@ vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_tumu( @@ -696,7 +696,7 @@ vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclip_wv_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_tumu( @@ -705,7 +705,7 @@ vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_tumu( @@ -714,7 +714,7 @@ vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclip_wv_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_tumu( @@ -723,7 +723,7 @@ vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_tumu( @@ -732,7 +732,7 @@ vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclip_wv_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_tumu( @@ -741,7 +741,7 @@ vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_tumu( @@ -750,7 +750,7 @@ vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclip_wv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_tumu( @@ -759,7 +759,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_tumu( @@ -768,7 +768,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclip_wv_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_tumu( @@ -777,7 +777,7 @@ vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_tumu( @@ -786,7 +786,7 @@ vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclip_wv_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_tumu( @@ -795,7 +795,7 @@ vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_tumu( @@ -804,7 +804,7 @@ vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclip_wv_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_tumu( @@ -813,7 +813,7 @@ vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_mu( @@ -822,7 +822,7 @@ vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclip_wv_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_mu( @@ -831,7 +831,7 @@ vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_mu( @@ -840,7 +840,7 @@ vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclip_wv_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_mu( @@ -849,7 +849,7 @@ vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_mu( @@ -858,7 +858,7 @@ vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclip_wv_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_mu( @@ -867,7 +867,7 @@ vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_mu( @@ -876,7 +876,7 @@ vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclip_wv_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_mu( @@ -885,7 +885,7 @@ vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_mu( @@ -894,7 +894,7 @@ vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclip_wv_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_mu( @@ -903,7 +903,7 @@ vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_mu( @@ -912,7 +912,7 @@ vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclip_wv_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_mu( @@ -921,7 +921,7 @@ vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_mu( @@ -930,7 +930,7 @@ vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclip_wv_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_mu( @@ -939,7 +939,7 @@ vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_mu( @@ -948,7 +948,7 @@ vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclip_wv_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_mu( @@ -957,7 +957,7 @@ vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_mu( @@ -966,7 +966,7 @@ vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclip_wv_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_mu( @@ -975,7 +975,7 @@ vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_mu( @@ -984,7 +984,7 @@ vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclip_wv_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_mu( @@ -993,7 +993,7 @@ vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_mu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclip_wv_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_mu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_mu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclip_wv_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_mu( @@ -1029,7 +1029,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_mu( @@ -1038,7 +1038,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclip_wv_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_mu( @@ -1047,7 +1047,7 @@ vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_mu( @@ -1056,7 +1056,7 @@ vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclip_wv_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_mu( @@ -1065,7 +1065,7 @@ vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_mu( @@ -1074,7 +1074,7 @@ vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclip_wv_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wv_i32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_mu( @@ -1083,6 +1083,6 @@ vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnclip_wx_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclip_wx_i32m4_mu(mask, maskedoff, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclipu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclipu.c index fd3a59ffec4a..824612703e9c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclipu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnclipu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclipu_wv_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclipu_wv_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclipu_wv_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclipu_wv_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclipu_wv_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclipu_wv_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclipu_wv_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_tu( @@ -129,7 +129,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_tu( @@ -138,7 +138,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclipu_wv_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_tu( @@ -147,7 +147,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_tu( @@ -156,7 +156,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclipu_wv_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_tu( @@ -165,7 +165,7 @@ vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_tu( @@ -174,7 +174,7 @@ vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclipu_wv_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_tu( @@ -183,7 +183,7 @@ vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_tu( @@ -192,7 +192,7 @@ vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclipu_wv_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_tu( @@ -201,7 +201,7 @@ vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tu( @@ -210,7 +210,7 @@ vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclipu_wv_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tu( @@ -219,7 +219,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_tu( @@ -228,7 +228,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclipu_wv_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_tu( @@ -237,7 +237,7 @@ vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_tu( @@ -246,7 +246,7 @@ vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclipu_wv_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_tu( @@ -255,7 +255,7 @@ vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_tu( @@ -264,7 +264,7 @@ vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclipu_wv_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_tu( @@ -273,7 +273,7 @@ vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_tum( @@ -282,7 +282,7 @@ vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclipu_wv_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_tum( @@ -291,7 +291,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_tum( @@ -300,7 +300,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclipu_wv_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_tum( @@ -309,7 +309,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_tum( @@ -318,7 +318,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclipu_wv_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_tum( @@ -327,7 +327,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_tum( @@ -336,7 +336,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclipu_wv_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_tum( @@ -345,7 +345,7 @@ vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_tum( @@ -354,7 +354,7 @@ vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclipu_wv_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_tum( @@ -363,7 +363,7 @@ vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_tum( @@ -372,7 +372,7 @@ vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclipu_wv_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_tum( @@ -381,7 +381,7 @@ vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_tum( @@ -390,7 +390,7 @@ vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclipu_wv_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_tum( @@ -399,7 +399,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_tum( @@ -408,7 +408,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclipu_wv_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_tum( @@ -417,7 +417,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_tum( @@ -426,7 +426,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclipu_wv_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_tum( @@ -435,7 +435,7 @@ vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_tum( @@ -444,7 +444,7 @@ vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclipu_wv_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_tum( @@ -453,7 +453,7 @@ vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_tum( @@ -462,7 +462,7 @@ vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclipu_wv_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_tum( @@ -471,7 +471,7 @@ vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tum( @@ -480,7 +480,7 @@ vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclipu_wv_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tum( @@ -489,7 +489,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_tum( @@ -498,7 +498,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclipu_wv_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_tum( @@ -507,7 +507,7 @@ vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_tum( @@ -516,7 +516,7 @@ vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclipu_wv_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_tum( @@ -525,7 +525,7 @@ vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_tum( @@ -534,7 +534,7 @@ vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclipu_wv_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_tum( @@ -543,7 +543,7 @@ vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_tumu( @@ -552,7 +552,7 @@ vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclipu_wv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_tumu( @@ -561,7 +561,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_tumu( @@ -570,7 +570,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclipu_wv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_tumu( @@ -579,7 +579,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_tumu( @@ -588,7 +588,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclipu_wv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_tumu( @@ -597,7 +597,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_tumu( @@ -606,7 +606,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclipu_wv_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_tumu( @@ -615,7 +615,7 @@ vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_tumu( @@ -624,7 +624,7 @@ vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclipu_wv_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_tumu( @@ -633,7 +633,7 @@ vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_tumu( @@ -642,7 +642,7 @@ vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclipu_wv_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_tumu( @@ -651,7 +651,7 @@ vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_tumu( @@ -660,7 +660,7 @@ vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclipu_wv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_tumu( @@ -669,7 +669,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_tumu( @@ -678,7 +678,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclipu_wv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_tumu( @@ -687,7 +687,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_tumu( @@ -696,7 +696,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclipu_wv_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_tumu( @@ -705,7 +705,7 @@ vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_tumu( @@ -714,7 +714,7 @@ vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclipu_wv_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_tumu( @@ -723,7 +723,7 @@ vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_tumu( @@ -732,7 +732,7 @@ vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclipu_wv_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_tumu( @@ -741,7 +741,7 @@ vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_tumu( @@ -750,7 +750,7 @@ vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclipu_wv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_tumu( @@ -759,7 +759,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_tumu( @@ -768,7 +768,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclipu_wv_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_tumu( @@ -777,7 +777,7 @@ vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_tumu( @@ -786,7 +786,7 @@ vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclipu_wv_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_tumu( @@ -795,7 +795,7 @@ vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_tumu( @@ -804,7 +804,7 @@ vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclipu_wv_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_tumu( @@ -813,7 +813,7 @@ vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_mu( @@ -822,7 +822,7 @@ vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclipu_wv_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_mu( @@ -831,7 +831,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_mu( @@ -840,7 +840,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclipu_wv_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_mu( @@ -849,7 +849,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_mu( @@ -858,7 +858,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclipu_wv_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_mu( @@ -867,7 +867,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_mu( @@ -876,7 +876,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclipu_wv_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_mu( @@ -885,7 +885,7 @@ vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_mu( @@ -894,7 +894,7 @@ vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclipu_wv_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_mu( @@ -903,7 +903,7 @@ vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_mu( @@ -912,7 +912,7 @@ vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclipu_wv_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_mu( @@ -921,7 +921,7 @@ vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_mu( @@ -930,7 +930,7 @@ vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclipu_wv_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_mu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_mu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclipu_wv_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_mu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_mu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclipu_wv_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_mu( @@ -975,7 +975,7 @@ vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_mu( @@ -984,7 +984,7 @@ vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclipu_wv_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_mu( @@ -993,7 +993,7 @@ vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_mu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclipu_wv_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_mu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_mu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclipu_wv_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_mu( @@ -1029,7 +1029,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_mu( @@ -1038,7 +1038,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclipu_wv_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_mu( @@ -1047,7 +1047,7 @@ vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_mu( @@ -1056,7 +1056,7 @@ vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclipu_wv_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_mu( @@ -1065,7 +1065,7 @@ vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_mu( @@ -1074,7 +1074,7 @@ vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclipu_wv_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wv_u32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_mu( @@ -1083,6 +1083,6 @@ vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnclipu_wx_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnclipu_wx_u32m4_mu(mask, maskedoff, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vncvt.c index 76c3e732d4a2..fa3be7ceff59 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vncvt.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return vncvt_x_x_w_i8mf8_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return vncvt_x_x_w_i8mf4_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_tu( @@ -30,7 +30,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return vncvt_x_x_w_i8mf2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_tu( @@ -39,7 +39,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1_tu(vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return vncvt_x_x_w_i8m1_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1_tu(vint8m1_t maskedoff, vint16m2_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2_tu(vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return vncvt_x_x_w_i8m2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_tu( @@ -57,7 +57,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2_tu(vint8m2_t maskedoff, vint16m4_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4_tu(vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return vncvt_x_x_w_i8m4_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_tu( @@ -66,7 +66,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4_tu(vint8m4_t maskedoff, vint16m8_t src, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return vncvt_x_x_w_u8mf8_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_tu( @@ -75,7 +75,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return vncvt_x_x_w_u8mf4_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_tu( @@ -84,7 +84,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return vncvt_x_x_w_u8mf2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_tu( @@ -93,7 +93,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return vncvt_x_x_w_u8m1_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_tu( @@ -102,7 +102,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return vncvt_x_x_w_u8m2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return vncvt_x_x_w_u8m4_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vncvt_x_x_w_i16mf4_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_tu( @@ -129,7 +129,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vncvt_x_x_w_i16mf2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_tu( @@ -138,7 +138,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1_tu(vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vncvt_x_x_w_i16m1_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_tu( @@ -147,7 +147,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1_tu(vint16m1_t maskedoff, vint32m2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2_tu(vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vncvt_x_x_w_i16m2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_tu( @@ -156,7 +156,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2_tu(vint16m2_t maskedoff, vint32m4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4_tu(vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vncvt_x_x_w_i16m4_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tu( @@ -165,7 +165,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4_tu(vint16m4_t maskedoff, vint32m8_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vncvt_x_x_w_u16mf4_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_tu( @@ -174,7 +174,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vncvt_x_x_w_u16mf2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_tu( @@ -183,7 +183,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vncvt_x_x_w_u16m1_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vncvt_x_x_w_u16m2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vncvt_x_x_w_u16m4_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_tu( @@ -210,7 +210,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vncvt_x_x_w_i32mf2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_tu( @@ -219,7 +219,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1_tu(vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vncvt_x_x_w_i32m1_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_tu( @@ -228,7 +228,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1_tu(vint32m1_t maskedoff, vint64m2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2_tu(vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vncvt_x_x_w_i32m2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_tu( @@ -237,7 +237,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2_tu(vint32m2_t maskedoff, vint64m4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4_tu(vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vncvt_x_x_w_i32m4_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_tu( @@ -246,7 +246,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4_tu(vint32m4_t maskedoff, vint64m8_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vncvt_x_x_w_u32mf2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vncvt_x_x_w_u32m1_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_tu( @@ -264,7 +264,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vncvt_x_x_w_u32m2_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_tu( @@ -273,7 +273,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vncvt_x_x_w_u32m4_tu(maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_tum( @@ -282,7 +282,7 @@ vuint32m4_t test_vncvt_x_x_w_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return vncvt_x_x_w_i8mf8_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_tum( @@ -291,7 +291,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return vncvt_x_x_w_i8mf4_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_tum( @@ -300,7 +300,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return vncvt_x_x_w_i8mf2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_tum( @@ -309,7 +309,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return vncvt_x_x_w_i8m1_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_tum( @@ -318,7 +318,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return vncvt_x_x_w_i8m2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_tum( @@ -327,7 +327,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return vncvt_x_x_w_i8m4_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_tum( @@ -336,7 +336,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return vncvt_x_x_w_u8mf8_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_tum( @@ -345,7 +345,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return vncvt_x_x_w_u8mf4_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_tum( @@ -354,7 +354,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return vncvt_x_x_w_u8mf2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_tum( @@ -363,7 +363,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return vncvt_x_x_w_u8m1_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_tum( @@ -372,7 +372,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return vncvt_x_x_w_u8m2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_tum( @@ -381,7 +381,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return vncvt_x_x_w_u8m4_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tum( @@ -390,7 +390,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vncvt_x_x_w_i16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_tum( @@ -399,7 +399,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vncvt_x_x_w_i16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_tum( @@ -408,7 +408,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vncvt_x_x_w_i16m1_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_tum( @@ -417,7 +417,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vncvt_x_x_w_i16m2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_tum( @@ -426,7 +426,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vncvt_x_x_w_i16m4_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tum( @@ -435,7 +435,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vncvt_x_x_w_u16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_tum( @@ -444,7 +444,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vncvt_x_x_w_u16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_tum( @@ -453,7 +453,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vncvt_x_x_w_u16m1_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_tum( @@ -462,7 +462,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vncvt_x_x_w_u16m2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_tum( @@ -471,7 +471,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vncvt_x_x_w_u16m4_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_tum( @@ -480,7 +480,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vncvt_x_x_w_i32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_tum( @@ -489,7 +489,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vncvt_x_x_w_i32m1_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_tum( @@ -498,7 +498,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vncvt_x_x_w_i32m2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_tum( @@ -507,7 +507,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vncvt_x_x_w_i32m4_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_tum( @@ -516,7 +516,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vncvt_x_x_w_u32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_tum( @@ -525,7 +525,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vncvt_x_x_w_u32m1_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_tum( @@ -534,7 +534,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vncvt_x_x_w_u32m2_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_tum( @@ -543,7 +543,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vncvt_x_x_w_u32m4_tum(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_tumu( @@ -552,7 +552,7 @@ vuint32m4_t test_vncvt_x_x_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return vncvt_x_x_w_i8mf8_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_tumu( @@ -561,7 +561,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return vncvt_x_x_w_i8mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_tumu( @@ -570,7 +570,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return vncvt_x_x_w_i8mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_tumu( @@ -579,7 +579,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return vncvt_x_x_w_i8m1_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_tumu( @@ -588,7 +588,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return vncvt_x_x_w_i8m2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_tumu( @@ -597,7 +597,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return vncvt_x_x_w_i8m4_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_tumu( @@ -606,7 +606,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return vncvt_x_x_w_u8mf8_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_tumu( @@ -615,7 +615,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return vncvt_x_x_w_u8mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_tumu( @@ -624,7 +624,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return vncvt_x_x_w_u8mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_tumu( @@ -633,7 +633,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return vncvt_x_x_w_u8m1_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_tumu( @@ -642,7 +642,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return vncvt_x_x_w_u8m2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_tumu( @@ -651,7 +651,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return vncvt_x_x_w_u8m4_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tumu( @@ -660,7 +660,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vncvt_x_x_w_i16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_tumu( @@ -669,7 +669,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vncvt_x_x_w_i16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_tumu( @@ -678,7 +678,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vncvt_x_x_w_i16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_tumu( @@ -687,7 +687,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vncvt_x_x_w_i16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_tumu( @@ -696,7 +696,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vncvt_x_x_w_i16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tumu( @@ -705,7 +705,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vncvt_x_x_w_u16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_tumu( @@ -714,7 +714,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vncvt_x_x_w_u16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_tumu( @@ -723,7 +723,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vncvt_x_x_w_u16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_tumu( @@ -732,7 +732,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vncvt_x_x_w_u16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_tumu( @@ -741,7 +741,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vncvt_x_x_w_u16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_tumu( @@ -750,7 +750,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vncvt_x_x_w_i32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_tumu( @@ -759,7 +759,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vncvt_x_x_w_i32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_tumu( @@ -768,7 +768,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vncvt_x_x_w_i32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_tumu( @@ -777,7 +777,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vncvt_x_x_w_i32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_tumu( @@ -786,7 +786,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vncvt_x_x_w_u32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_tumu( @@ -795,7 +795,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vncvt_x_x_w_u32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_tumu( @@ -804,7 +804,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vncvt_x_x_w_u32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_tumu( @@ -813,7 +813,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vncvt_x_x_w_u32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_mu( @@ -822,7 +822,7 @@ vuint32m4_t test_vncvt_x_x_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return vncvt_x_x_w_i8mf8_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_mu( @@ -831,7 +831,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return vncvt_x_x_w_i8mf4_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_mu( @@ -840,7 +840,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return vncvt_x_x_w_i8mf2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_mu( @@ -849,7 +849,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return vncvt_x_x_w_i8m1_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_mu( @@ -858,7 +858,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return vncvt_x_x_w_i8m2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_mu( @@ -867,7 +867,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return vncvt_x_x_w_i8m4_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i8m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_mu( @@ -876,7 +876,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return vncvt_x_x_w_u8mf8_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_mu( @@ -885,7 +885,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return vncvt_x_x_w_u8mf4_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_mu( @@ -894,7 +894,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return vncvt_x_x_w_u8mf2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_mu( @@ -903,7 +903,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return vncvt_x_x_w_u8m1_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_mu( @@ -912,7 +912,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return vncvt_x_x_w_u8m2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_mu( @@ -921,7 +921,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return vncvt_x_x_w_u8m4_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u8m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_mu( @@ -930,7 +930,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vncvt_x_x_w_i16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_mu( @@ -939,7 +939,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vncvt_x_x_w_i16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_mu( @@ -948,7 +948,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vncvt_x_x_w_i16m1_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_mu( @@ -957,7 +957,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vncvt_x_x_w_i16m2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_mu( @@ -966,7 +966,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vncvt_x_x_w_i16m4_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_mu( @@ -975,7 +975,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vncvt_x_x_w_u16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_mu( @@ -984,7 +984,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vncvt_x_x_w_u16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_mu( @@ -993,7 +993,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vncvt_x_x_w_u16m1_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_mu( @@ -1002,7 +1002,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vncvt_x_x_w_u16m2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_mu( @@ -1011,7 +1011,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vncvt_x_x_w_u16m4_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_mu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vncvt_x_x_w_i32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_mu( @@ -1029,7 +1029,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vncvt_x_x_w_i32m1_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_mu( @@ -1038,7 +1038,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vncvt_x_x_w_i32m2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_mu( @@ -1047,7 +1047,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vncvt_x_x_w_i32m4_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_i32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_mu( @@ -1056,7 +1056,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vncvt_x_x_w_u32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_mu( @@ -1065,7 +1065,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vncvt_x_x_w_u32m1_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_mu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vncvt_x_x_w_u32m2_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_mu( @@ -1083,6 +1083,6 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vncvt_x_x_w_u32m4_mu(mask, maskedoff, src, vl); + return __riscv_vncvt_x_x_w_u32m4_mu(mask, maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vneg.c index 89cca3bd7930..58557df51c54 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vneg.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vneg_v_i8mf8_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i8mf8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_tu( @@ -22,7 +22,7 @@ vint8mf8_t test_vneg_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vneg_v_i8mf4_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i8mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_tu( @@ -31,7 +31,7 @@ vint8mf4_t test_vneg_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vneg_v_i8mf2_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i8mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m1_tu( @@ -40,7 +40,7 @@ vint8mf2_t test_vneg_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vneg_v_i8m1_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i8m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m2_tu( @@ -49,7 +49,7 @@ vint8m1_t test_vneg_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vneg_v_i8m2_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i8m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m4_tu( @@ -58,7 +58,7 @@ vint8m2_t test_vneg_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vneg_v_i8m4_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i8m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m8_tu( @@ -67,7 +67,7 @@ vint8m4_t test_vneg_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vneg_v_i8m8_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i8m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_tu( @@ -76,7 +76,7 @@ vint8m8_t test_vneg_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vneg_v_i16mf4_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_tu( @@ -85,7 +85,7 @@ vint16mf4_t test_vneg_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vneg_v_i16mf2_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m1_tu( @@ -94,7 +94,7 @@ vint16mf2_t test_vneg_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vneg_v_i16m1_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m2_tu( @@ -103,7 +103,7 @@ vint16m1_t test_vneg_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vneg_v_i16m2_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m4_tu( @@ -112,7 +112,7 @@ vint16m2_t test_vneg_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vneg_v_i16m4_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m8_tu( @@ -121,7 +121,7 @@ vint16m4_t test_vneg_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vneg_v_i16m8_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tu( @@ -130,7 +130,7 @@ vint16m8_t test_vneg_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vneg_v_i32mf2_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m1_tu( @@ -139,7 +139,7 @@ vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vneg_v_i32m1_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m2_tu( @@ -148,7 +148,7 @@ vint32m1_t test_vneg_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vneg_v_i32m2_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m4_tu( @@ -157,7 +157,7 @@ vint32m2_t test_vneg_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vneg_v_i32m4_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m8_tu( @@ -166,7 +166,7 @@ vint32m4_t test_vneg_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vneg_v_i32m8_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m1_tu( @@ -175,7 +175,7 @@ vint32m8_t test_vneg_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vneg_v_i64m1_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m2_tu( @@ -184,7 +184,7 @@ vint64m1_t test_vneg_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vneg_v_i64m2_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m4_tu( @@ -193,7 +193,7 @@ vint64m2_t test_vneg_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vneg_v_i64m4_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m8_tu( @@ -202,7 +202,7 @@ vint64m4_t test_vneg_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vneg_v_i64m8_tu(maskedoff, op1, vl); + return __riscv_vneg_v_i64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf8_tum( @@ -211,7 +211,7 @@ vint64m8_t test_vneg_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vneg_v_i8mf8_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8mf8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_tum( @@ -220,7 +220,7 @@ vint8mf8_t test_vneg_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vneg_v_i8mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_tum( @@ -229,7 +229,7 @@ vint8mf4_t test_vneg_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vneg_v_i8mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m1_tum( @@ -238,7 +238,7 @@ vint8mf2_t test_vneg_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vneg_v_i8m1_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m2_tum( @@ -247,7 +247,7 @@ vint8m1_t test_vneg_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vneg_v_i8m2_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m4_tum( @@ -256,7 +256,7 @@ vint8m2_t test_vneg_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vneg_v_i8m4_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m8_tum( @@ -265,7 +265,7 @@ vint8m4_t test_vneg_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vneg_v_i8m8_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_tum( @@ -274,7 +274,7 @@ vint8m8_t test_vneg_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vneg_v_i16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_tum( @@ -283,7 +283,7 @@ vint16mf4_t test_vneg_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vneg_v_i16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m1_tum( @@ -292,7 +292,7 @@ vint16mf2_t test_vneg_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vneg_v_i16m1_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m2_tum( @@ -301,7 +301,7 @@ vint16m1_t test_vneg_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vneg_v_i16m2_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m4_tum( @@ -310,7 +310,7 @@ vint16m2_t test_vneg_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vneg_v_i16m4_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m8_tum( @@ -319,7 +319,7 @@ vint16m4_t test_vneg_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vneg_v_i16m8_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tum( @@ -328,7 +328,7 @@ vint16m8_t test_vneg_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vneg_v_i32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m1_tum( @@ -337,7 +337,7 @@ vint32mf2_t test_vneg_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vneg_v_i32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m2_tum( @@ -346,7 +346,7 @@ vint32m1_t test_vneg_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vneg_v_i32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m4_tum( @@ -355,7 +355,7 @@ vint32m2_t test_vneg_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vneg_v_i32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m8_tum( @@ -364,7 +364,7 @@ vint32m4_t test_vneg_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vneg_v_i32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m1_tum( @@ -373,7 +373,7 @@ vint32m8_t test_vneg_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vneg_v_i64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m2_tum( @@ -382,7 +382,7 @@ vint64m1_t test_vneg_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vneg_v_i64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m4_tum( @@ -391,7 +391,7 @@ vint64m2_t test_vneg_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vneg_v_i64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m8_tum( @@ -400,7 +400,7 @@ vint64m4_t test_vneg_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vneg_v_i64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf8_tumu( @@ -409,7 +409,7 @@ vint64m8_t test_vneg_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vneg_v_i8mf8_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8mf8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_tumu( @@ -418,7 +418,7 @@ vint8mf8_t test_vneg_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vneg_v_i8mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_tumu( @@ -427,7 +427,7 @@ vint8mf4_t test_vneg_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vneg_v_i8mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m1_tumu( @@ -436,7 +436,7 @@ vint8mf2_t test_vneg_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vneg_v_i8m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m2_tumu( @@ -445,7 +445,7 @@ vint8m1_t test_vneg_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vneg_v_i8m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m4_tumu( @@ -454,7 +454,7 @@ vint8m2_t test_vneg_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vneg_v_i8m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m8_tumu( @@ -463,7 +463,7 @@ vint8m4_t test_vneg_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vneg_v_i8m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_tumu( @@ -472,7 +472,7 @@ vint8m8_t test_vneg_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vneg_v_i16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_tumu( @@ -481,7 +481,7 @@ vint16mf4_t test_vneg_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vneg_v_i16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m1_tumu( @@ -490,7 +490,7 @@ vint16mf2_t test_vneg_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vneg_v_i16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m2_tumu( @@ -499,7 +499,7 @@ vint16m1_t test_vneg_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vneg_v_i16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m4_tumu( @@ -508,7 +508,7 @@ vint16m2_t test_vneg_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vneg_v_i16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m8_tumu( @@ -517,7 +517,7 @@ vint16m4_t test_vneg_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vneg_v_i16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tumu( @@ -526,7 +526,7 @@ vint16m8_t test_vneg_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vneg_v_i32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m1_tumu( @@ -535,7 +535,7 @@ vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vneg_v_i32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m2_tumu( @@ -544,7 +544,7 @@ vint32m1_t test_vneg_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vneg_v_i32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m4_tumu( @@ -553,7 +553,7 @@ vint32m2_t test_vneg_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vneg_v_i32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m8_tumu( @@ -562,7 +562,7 @@ vint32m4_t test_vneg_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vneg_v_i32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m1_tumu( @@ -571,7 +571,7 @@ vint32m8_t test_vneg_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vneg_v_i64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m2_tumu( @@ -580,7 +580,7 @@ vint64m1_t test_vneg_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vneg_v_i64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m4_tumu( @@ -589,7 +589,7 @@ vint64m2_t test_vneg_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vneg_v_i64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m8_tumu( @@ -598,7 +598,7 @@ vint64m4_t test_vneg_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vneg_v_i64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf8_mu( @@ -607,7 +607,7 @@ vint64m8_t test_vneg_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vneg_v_i8mf8_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8mf8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_mu( @@ -616,7 +616,7 @@ vint8mf8_t test_vneg_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vneg_v_i8mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_mu( @@ -625,7 +625,7 @@ vint8mf4_t test_vneg_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vneg_v_i8mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m1_mu( @@ -634,7 +634,7 @@ vint8mf2_t test_vneg_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vneg_v_i8m1_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m2_mu( @@ -643,7 +643,7 @@ vint8m1_t test_vneg_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vneg_v_i8m2_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m4_mu( @@ -652,7 +652,7 @@ vint8m2_t test_vneg_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vneg_v_i8m4_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i8m8_mu( @@ -661,7 +661,7 @@ vint8m4_t test_vneg_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vneg_v_i8m8_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i8m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_mu( @@ -670,7 +670,7 @@ vint8m8_t test_vneg_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vneg_v_i16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_mu( @@ -679,7 +679,7 @@ vint16mf4_t test_vneg_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vneg_v_i16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m1_mu( @@ -688,7 +688,7 @@ vint16mf2_t test_vneg_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vneg_v_i16m1_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m2_mu( @@ -697,7 +697,7 @@ vint16m1_t test_vneg_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vneg_v_i16m2_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m4_mu( @@ -706,7 +706,7 @@ vint16m2_t test_vneg_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vneg_v_i16m4_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i16m8_mu( @@ -715,7 +715,7 @@ vint16m4_t test_vneg_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vneg_v_i16m8_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_mu( @@ -724,7 +724,7 @@ vint16m8_t test_vneg_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vneg_v_i32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m1_mu( @@ -733,7 +733,7 @@ vint32mf2_t test_vneg_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vneg_v_i32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m2_mu( @@ -742,7 +742,7 @@ vint32m1_t test_vneg_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vneg_v_i32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m4_mu( @@ -751,7 +751,7 @@ vint32m2_t test_vneg_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vneg_v_i32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i32m8_mu( @@ -760,7 +760,7 @@ vint32m4_t test_vneg_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vneg_v_i32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m1_mu( @@ -769,7 +769,7 @@ vint32m8_t test_vneg_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vneg_v_i64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m2_mu( @@ -778,7 +778,7 @@ vint64m1_t test_vneg_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vneg_v_i64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m4_mu( @@ -787,7 +787,7 @@ vint64m2_t test_vneg_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vneg_v_i64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vneg_v_i64m8_mu( @@ -796,6 +796,6 @@ vint64m4_t test_vneg_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vneg_v_i64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vneg_v_i64m8_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsac.c index fc2bba85817d..6037878f3e39 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsac.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vv_i8mf8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_tu( @@ -22,7 +22,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vx_i8mf8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_tu( @@ -31,7 +31,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vv_i8mf4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_tu( @@ -40,7 +40,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vx_i8mf4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_tu( @@ -49,7 +49,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vv_i8mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_tu( @@ -58,7 +58,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vx_i8mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_tu( @@ -67,7 +67,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vv_i8m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_tu( @@ -76,7 +76,7 @@ vint8m1_t test_vnmsac_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vx_i8m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_tu( @@ -85,7 +85,7 @@ vint8m1_t test_vnmsac_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vv_i8m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_tu( @@ -94,7 +94,7 @@ vint8m2_t test_vnmsac_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vx_i8m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_tu( @@ -103,7 +103,7 @@ vint8m2_t test_vnmsac_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vv_i8m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_tu( @@ -112,7 +112,7 @@ vint8m4_t test_vnmsac_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vx_i8m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_tu( @@ -121,7 +121,7 @@ vint8m4_t test_vnmsac_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vv_i8m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_tu( @@ -130,7 +130,7 @@ vint8m8_t test_vnmsac_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vx_i8m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_tu( @@ -139,7 +139,7 @@ vint8m8_t test_vnmsac_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vv_i16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_tu( @@ -148,7 +148,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vx_i16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_tu( @@ -157,7 +157,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vv_i16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_tu( @@ -166,7 +166,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vx_i16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_tu( @@ -175,7 +175,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vv_i16m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_tu( @@ -184,7 +184,7 @@ vint16m1_t test_vnmsac_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vx_i16m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_tu( @@ -193,7 +193,7 @@ vint16m1_t test_vnmsac_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vv_i16m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_tu( @@ -202,7 +202,7 @@ vint16m2_t test_vnmsac_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vx_i16m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_tu( @@ -211,7 +211,7 @@ vint16m2_t test_vnmsac_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vv_i16m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_tu( @@ -220,7 +220,7 @@ vint16m4_t test_vnmsac_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vx_i16m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_tu( @@ -229,7 +229,7 @@ vint16m4_t test_vnmsac_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vv_i16m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_tu( @@ -238,7 +238,7 @@ vint16m8_t test_vnmsac_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vx_i16m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tu( @@ -247,7 +247,7 @@ vint16m8_t test_vnmsac_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vv_i32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tu( @@ -256,7 +256,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vx_i32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_tu( @@ -265,7 +265,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vv_i32m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_tu( @@ -274,7 +274,7 @@ vint32m1_t test_vnmsac_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vx_i32m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_tu( @@ -283,7 +283,7 @@ vint32m1_t test_vnmsac_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vv_i32m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_tu( @@ -292,7 +292,7 @@ vint32m2_t test_vnmsac_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vx_i32m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_tu( @@ -301,7 +301,7 @@ vint32m2_t test_vnmsac_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vv_i32m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_tu( @@ -310,7 +310,7 @@ vint32m4_t test_vnmsac_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vx_i32m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_tu( @@ -319,7 +319,7 @@ vint32m4_t test_vnmsac_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vv_i32m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_tu( @@ -328,7 +328,7 @@ vint32m8_t test_vnmsac_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vx_i32m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_tu( @@ -337,7 +337,7 @@ vint32m8_t test_vnmsac_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vv_i64m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_tu( @@ -346,7 +346,7 @@ vint64m1_t test_vnmsac_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vx_i64m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_tu( @@ -355,7 +355,7 @@ vint64m1_t test_vnmsac_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vv_i64m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_tu( @@ -364,7 +364,7 @@ vint64m2_t test_vnmsac_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vx_i64m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_tu( @@ -373,7 +373,7 @@ vint64m2_t test_vnmsac_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vv_i64m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_tu( @@ -382,7 +382,7 @@ vint64m4_t test_vnmsac_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vx_i64m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_tu( @@ -391,7 +391,7 @@ vint64m4_t test_vnmsac_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vv_i64m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_tu( @@ -400,7 +400,7 @@ vint64m8_t test_vnmsac_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vx_i64m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_tu( @@ -409,7 +409,7 @@ vint64m8_t test_vnmsac_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vv_u8mf8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_tu( @@ -418,7 +418,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vx_u8mf8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_tu( @@ -427,7 +427,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vv_u8mf4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_tu( @@ -436,7 +436,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vx_u8mf4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_tu( @@ -445,7 +445,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vv_u8mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_tu( @@ -454,7 +454,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vx_u8mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_tu( @@ -463,7 +463,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vv_u8m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_tu( @@ -472,7 +472,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vx_u8m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_tu( @@ -481,7 +481,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vv_u8m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_tu( @@ -490,7 +490,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vx_u8m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_tu( @@ -499,7 +499,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vv_u8m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_tu( @@ -508,7 +508,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vx_u8m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_tu( @@ -517,7 +517,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vv_u8m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_tu( @@ -526,7 +526,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vx_u8m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_tu( @@ -535,7 +535,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vv_u16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_tu( @@ -544,7 +544,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vx_u16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_tu( @@ -553,7 +553,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vv_u16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_tu( @@ -562,7 +562,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vx_u16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_tu( @@ -571,7 +571,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vv_u16m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_tu( @@ -580,7 +580,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vx_u16m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_tu( @@ -589,7 +589,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vv_u16m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_tu( @@ -598,7 +598,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vx_u16m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_tu( @@ -607,7 +607,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vv_u16m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_tu( @@ -616,7 +616,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vx_u16m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_tu( @@ -625,7 +625,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vv_u16m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_tu( @@ -634,7 +634,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vx_u16m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tu( @@ -643,7 +643,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vv_u32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tu( @@ -652,7 +652,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vx_u32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_tu( @@ -661,7 +661,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vv_u32m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_tu( @@ -670,7 +670,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vx_u32m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_tu( @@ -679,7 +679,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vv_u32m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_tu( @@ -688,7 +688,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vx_u32m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_tu( @@ -697,7 +697,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vv_u32m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_tu( @@ -706,7 +706,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vx_u32m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_tu( @@ -715,7 +715,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vv_u32m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_tu( @@ -724,7 +724,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vx_u32m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_tu( @@ -733,7 +733,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vv_u64m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_tu( @@ -742,7 +742,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vx_u64m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_tu( @@ -751,7 +751,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vv_u64m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_tu( @@ -760,7 +760,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vx_u64m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_tu( @@ -769,7 +769,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vv_u64m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_tu( @@ -778,7 +778,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vx_u64m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_tu( @@ -787,7 +787,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vv_u64m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_tu( @@ -796,7 +796,7 @@ vuint64m8_t test_vnmsac_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vx_u64m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_tum( @@ -805,7 +805,7 @@ vuint64m8_t test_vnmsac_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_tum( @@ -814,7 +814,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_tum( @@ -823,7 +823,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_tum( @@ -832,7 +832,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_tum( @@ -841,7 +841,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_tum( @@ -850,7 +850,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_tum( @@ -859,7 +859,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vv_i8m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_tum( @@ -868,7 +868,7 @@ vint8m1_t test_vnmsac_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vx_i8m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_tum( @@ -877,7 +877,7 @@ vint8m1_t test_vnmsac_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vv_i8m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_tum( @@ -886,7 +886,7 @@ vint8m2_t test_vnmsac_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vx_i8m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_tum( @@ -895,7 +895,7 @@ vint8m2_t test_vnmsac_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vv_i8m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_tum( @@ -904,7 +904,7 @@ vint8m4_t test_vnmsac_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vx_i8m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_tum( @@ -913,7 +913,7 @@ vint8m4_t test_vnmsac_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vv_i8m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_tum( @@ -922,7 +922,7 @@ vint8m8_t test_vnmsac_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vx_i8m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_tum( @@ -931,7 +931,7 @@ vint8m8_t test_vnmsac_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_tum( @@ -940,7 +940,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_tum( @@ -949,7 +949,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_tum( @@ -958,7 +958,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_tum( @@ -967,7 +967,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vv_i16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_tum( @@ -976,7 +976,7 @@ vint16m1_t test_vnmsac_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vx_i16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_tum( @@ -985,7 +985,7 @@ vint16m1_t test_vnmsac_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vv_i16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_tum( @@ -994,7 +994,7 @@ vint16m2_t test_vnmsac_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vx_i16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_tum( @@ -1003,7 +1003,7 @@ vint16m2_t test_vnmsac_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vv_i16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_tum( @@ -1012,7 +1012,7 @@ vint16m4_t test_vnmsac_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vx_i16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_tum( @@ -1021,7 +1021,7 @@ vint16m4_t test_vnmsac_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vv_i16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_tum( @@ -1030,7 +1030,7 @@ vint16m8_t test_vnmsac_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vx_i16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tum( @@ -1039,7 +1039,7 @@ vint16m8_t test_vnmsac_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tum( @@ -1048,7 +1048,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_tum( @@ -1057,7 +1057,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vv_i32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_tum( @@ -1066,7 +1066,7 @@ vint32m1_t test_vnmsac_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vx_i32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_tum( @@ -1075,7 +1075,7 @@ vint32m1_t test_vnmsac_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vv_i32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_tum( @@ -1084,7 +1084,7 @@ vint32m2_t test_vnmsac_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vx_i32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_tum( @@ -1093,7 +1093,7 @@ vint32m2_t test_vnmsac_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vv_i32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_tum( @@ -1102,7 +1102,7 @@ vint32m4_t test_vnmsac_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vx_i32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_tum( @@ -1111,7 +1111,7 @@ vint32m4_t test_vnmsac_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vv_i32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_tum( @@ -1120,7 +1120,7 @@ vint32m8_t test_vnmsac_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vx_i32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_tum( @@ -1129,7 +1129,7 @@ vint32m8_t test_vnmsac_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vv_i64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_tum( @@ -1138,7 +1138,7 @@ vint64m1_t test_vnmsac_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vx_i64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_tum( @@ -1147,7 +1147,7 @@ vint64m1_t test_vnmsac_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vv_i64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_tum( @@ -1156,7 +1156,7 @@ vint64m2_t test_vnmsac_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vx_i64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_tum( @@ -1165,7 +1165,7 @@ vint64m2_t test_vnmsac_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vv_i64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_tum( @@ -1174,7 +1174,7 @@ vint64m4_t test_vnmsac_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vx_i64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_tum( @@ -1183,7 +1183,7 @@ vint64m4_t test_vnmsac_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vv_i64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_tum( @@ -1192,7 +1192,7 @@ vint64m8_t test_vnmsac_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vx_i64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_tum( @@ -1201,7 +1201,7 @@ vint64m8_t test_vnmsac_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_tum( @@ -1210,7 +1210,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_tum( @@ -1219,7 +1219,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_tum( @@ -1228,7 +1228,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_tum( @@ -1237,7 +1237,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_tum( @@ -1246,7 +1246,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_tum( @@ -1255,7 +1255,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vv_u8m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_tum( @@ -1264,7 +1264,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vx_u8m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_tum( @@ -1273,7 +1273,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vv_u8m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_tum( @@ -1282,7 +1282,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vx_u8m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_tum( @@ -1291,7 +1291,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vv_u8m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_tum( @@ -1300,7 +1300,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vx_u8m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_tum( @@ -1309,7 +1309,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vv_u8m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_tum( @@ -1318,7 +1318,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vx_u8m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_tum( @@ -1327,7 +1327,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_tum( @@ -1336,7 +1336,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_tum( @@ -1345,7 +1345,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_tum( @@ -1354,7 +1354,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_tum( @@ -1363,7 +1363,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vv_u16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_tum( @@ -1372,7 +1372,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vx_u16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_tum( @@ -1381,7 +1381,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vv_u16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_tum( @@ -1390,7 +1390,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vx_u16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_tum( @@ -1399,7 +1399,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vv_u16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_tum( @@ -1408,7 +1408,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vx_u16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_tum( @@ -1417,7 +1417,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vv_u16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_tum( @@ -1426,7 +1426,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vx_u16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tum( @@ -1435,7 +1435,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tum( @@ -1444,7 +1444,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_tum( @@ -1453,7 +1453,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vv_u32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_tum( @@ -1462,7 +1462,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vx_u32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_tum( @@ -1471,7 +1471,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vv_u32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_tum( @@ -1480,7 +1480,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vx_u32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_tum( @@ -1489,7 +1489,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vv_u32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_tum( @@ -1498,7 +1498,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vx_u32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_tum( @@ -1507,7 +1507,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vv_u32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_tum( @@ -1516,7 +1516,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vx_u32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_tum( @@ -1525,7 +1525,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vv_u64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_tum( @@ -1534,7 +1534,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vx_u64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_tum( @@ -1543,7 +1543,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vv_u64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_tum( @@ -1552,7 +1552,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vx_u64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_tum( @@ -1561,7 +1561,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vv_u64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_tum( @@ -1570,7 +1570,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vx_u64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_tum( @@ -1579,7 +1579,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vv_u64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_tum( @@ -1588,7 +1588,7 @@ vuint64m8_t test_vnmsac_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vx_u64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_tumu( @@ -1597,7 +1597,7 @@ vuint64m8_t test_vnmsac_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_tumu( @@ -1606,7 +1606,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_tumu( @@ -1615,7 +1615,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_tumu( @@ -1624,7 +1624,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_tumu( @@ -1633,7 +1633,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_tumu( @@ -1642,7 +1642,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_tumu( @@ -1651,7 +1651,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_tumu( @@ -1660,7 +1660,7 @@ vint8m1_t test_vnmsac_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_tumu( @@ -1669,7 +1669,7 @@ vint8m1_t test_vnmsac_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_tumu( @@ -1678,7 +1678,7 @@ vint8m2_t test_vnmsac_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_tumu( @@ -1687,7 +1687,7 @@ vint8m2_t test_vnmsac_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_tumu( @@ -1696,7 +1696,7 @@ vint8m4_t test_vnmsac_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_tumu( @@ -1705,7 +1705,7 @@ vint8m4_t test_vnmsac_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_tumu( @@ -1714,7 +1714,7 @@ vint8m8_t test_vnmsac_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_tumu( @@ -1723,7 +1723,7 @@ vint8m8_t test_vnmsac_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_tumu( @@ -1732,7 +1732,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_tumu( @@ -1741,7 +1741,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_tumu( @@ -1750,7 +1750,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_tumu( @@ -1759,7 +1759,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_tumu( @@ -1768,7 +1768,7 @@ vint16m1_t test_vnmsac_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_tumu( @@ -1777,7 +1777,7 @@ vint16m1_t test_vnmsac_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_tumu( @@ -1786,7 +1786,7 @@ vint16m2_t test_vnmsac_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_tumu( @@ -1795,7 +1795,7 @@ vint16m2_t test_vnmsac_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_tumu( @@ -1804,7 +1804,7 @@ vint16m4_t test_vnmsac_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_tumu( @@ -1813,7 +1813,7 @@ vint16m4_t test_vnmsac_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_tumu( @@ -1822,7 +1822,7 @@ vint16m8_t test_vnmsac_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_tumu( @@ -1831,7 +1831,7 @@ vint16m8_t test_vnmsac_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_tumu( @@ -1840,7 +1840,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_tumu( @@ -1849,7 +1849,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_tumu( @@ -1858,7 +1858,7 @@ vint32m1_t test_vnmsac_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_tumu( @@ -1867,7 +1867,7 @@ vint32m1_t test_vnmsac_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_tumu( @@ -1876,7 +1876,7 @@ vint32m2_t test_vnmsac_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_tumu( @@ -1885,7 +1885,7 @@ vint32m2_t test_vnmsac_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_tumu( @@ -1894,7 +1894,7 @@ vint32m4_t test_vnmsac_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_tumu( @@ -1903,7 +1903,7 @@ vint32m4_t test_vnmsac_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_tumu( @@ -1912,7 +1912,7 @@ vint32m8_t test_vnmsac_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_tumu( @@ -1921,7 +1921,7 @@ vint32m8_t test_vnmsac_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_tumu( @@ -1930,7 +1930,7 @@ vint64m1_t test_vnmsac_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_tumu( @@ -1939,7 +1939,7 @@ vint64m1_t test_vnmsac_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_tumu( @@ -1948,7 +1948,7 @@ vint64m2_t test_vnmsac_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_tumu( @@ -1957,7 +1957,7 @@ vint64m2_t test_vnmsac_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_tumu( @@ -1966,7 +1966,7 @@ vint64m4_t test_vnmsac_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_tumu( @@ -1975,7 +1975,7 @@ vint64m4_t test_vnmsac_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_tumu( @@ -1984,7 +1984,7 @@ vint64m8_t test_vnmsac_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_tumu( @@ -1993,7 +1993,7 @@ vint64m8_t test_vnmsac_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_tumu( @@ -2002,7 +2002,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_tumu( @@ -2011,7 +2011,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_tumu( @@ -2020,7 +2020,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_tumu( @@ -2029,7 +2029,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_tumu( @@ -2038,7 +2038,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_tumu( @@ -2047,7 +2047,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_tumu( @@ -2056,7 +2056,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_tumu( @@ -2065,7 +2065,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_tumu( @@ -2074,7 +2074,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_tumu( @@ -2083,7 +2083,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_tumu( @@ -2092,7 +2092,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_tumu( @@ -2101,7 +2101,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_tumu( @@ -2110,7 +2110,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_tumu( @@ -2119,7 +2119,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_tumu( @@ -2128,7 +2128,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_tumu( @@ -2137,7 +2137,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_tumu( @@ -2146,7 +2146,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_tumu( @@ -2155,7 +2155,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_tumu( @@ -2164,7 +2164,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_tumu( @@ -2173,7 +2173,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_tumu( @@ -2182,7 +2182,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_tumu( @@ -2191,7 +2191,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_tumu( @@ -2200,7 +2200,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_tumu( @@ -2209,7 +2209,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_tumu( @@ -2218,7 +2218,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_tumu( @@ -2227,7 +2227,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_tumu( @@ -2236,7 +2236,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_tumu( @@ -2245,7 +2245,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_tumu( @@ -2254,7 +2254,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_tumu( @@ -2263,7 +2263,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_tumu( @@ -2272,7 +2272,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_tumu( @@ -2281,7 +2281,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_tumu( @@ -2290,7 +2290,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_tumu( @@ -2299,7 +2299,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_tumu( @@ -2308,7 +2308,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_tumu( @@ -2317,7 +2317,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_tumu( @@ -2326,7 +2326,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_tumu( @@ -2335,7 +2335,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_tumu( @@ -2344,7 +2344,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_tumu( @@ -2353,7 +2353,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_tumu( @@ -2362,7 +2362,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_tumu( @@ -2371,7 +2371,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_tumu( @@ -2380,7 +2380,7 @@ vuint64m8_t test_vnmsac_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_mu( @@ -2389,7 +2389,7 @@ vuint64m8_t test_vnmsac_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_mu( @@ -2398,7 +2398,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsac_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_mu( @@ -2407,7 +2407,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_mu( @@ -2416,7 +2416,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsac_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_mu( @@ -2425,7 +2425,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_mu( @@ -2434,7 +2434,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsac_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_mu( @@ -2443,7 +2443,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vv_i8m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_mu( @@ -2452,7 +2452,7 @@ vint8m1_t test_vnmsac_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsac_vx_i8m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_mu( @@ -2461,7 +2461,7 @@ vint8m1_t test_vnmsac_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vv_i8m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_mu( @@ -2470,7 +2470,7 @@ vint8m2_t test_vnmsac_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsac_vx_i8m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_mu( @@ -2479,7 +2479,7 @@ vint8m2_t test_vnmsac_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vv_i8m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_mu( @@ -2488,7 +2488,7 @@ vint8m4_t test_vnmsac_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsac_vx_i8m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_mu( @@ -2497,7 +2497,7 @@ vint8m4_t test_vnmsac_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vv_i8m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i8m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_mu( @@ -2506,7 +2506,7 @@ vint8m8_t test_vnmsac_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsac_vx_i8m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i8m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_mu( @@ -2515,7 +2515,7 @@ vint8m8_t test_vnmsac_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_mu( @@ -2524,7 +2524,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsac_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_mu( @@ -2533,7 +2533,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_mu( @@ -2542,7 +2542,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsac_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_mu( @@ -2551,7 +2551,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vv_i16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_mu( @@ -2560,7 +2560,7 @@ vint16m1_t test_vnmsac_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsac_vx_i16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_mu( @@ -2569,7 +2569,7 @@ vint16m1_t test_vnmsac_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vv_i16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_mu( @@ -2578,7 +2578,7 @@ vint16m2_t test_vnmsac_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsac_vx_i16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_mu( @@ -2587,7 +2587,7 @@ vint16m2_t test_vnmsac_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vv_i16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_mu( @@ -2596,7 +2596,7 @@ vint16m4_t test_vnmsac_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsac_vx_i16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_mu( @@ -2605,7 +2605,7 @@ vint16m4_t test_vnmsac_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vv_i16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_mu( @@ -2614,7 +2614,7 @@ vint16m8_t test_vnmsac_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsac_vx_i16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_mu( @@ -2623,7 +2623,7 @@ vint16m8_t test_vnmsac_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_mu( @@ -2632,7 +2632,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsac_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_mu( @@ -2641,7 +2641,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vv_i32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_mu( @@ -2650,7 +2650,7 @@ vint32m1_t test_vnmsac_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsac_vx_i32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_mu( @@ -2659,7 +2659,7 @@ vint32m1_t test_vnmsac_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vv_i32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_mu( @@ -2668,7 +2668,7 @@ vint32m2_t test_vnmsac_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsac_vx_i32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_mu( @@ -2677,7 +2677,7 @@ vint32m2_t test_vnmsac_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vv_i32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_mu( @@ -2686,7 +2686,7 @@ vint32m4_t test_vnmsac_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsac_vx_i32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_mu( @@ -2695,7 +2695,7 @@ vint32m4_t test_vnmsac_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vv_i32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_mu( @@ -2704,7 +2704,7 @@ vint32m8_t test_vnmsac_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsac_vx_i32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_mu( @@ -2713,7 +2713,7 @@ vint32m8_t test_vnmsac_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vv_i64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_mu( @@ -2722,7 +2722,7 @@ vint64m1_t test_vnmsac_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsac_vx_i64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_mu( @@ -2731,7 +2731,7 @@ vint64m1_t test_vnmsac_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vv_i64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_mu( @@ -2740,7 +2740,7 @@ vint64m2_t test_vnmsac_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsac_vx_i64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_mu( @@ -2749,7 +2749,7 @@ vint64m2_t test_vnmsac_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vv_i64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_mu( @@ -2758,7 +2758,7 @@ vint64m4_t test_vnmsac_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsac_vx_i64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_mu( @@ -2767,7 +2767,7 @@ vint64m4_t test_vnmsac_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vv_i64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_i64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_mu( @@ -2776,7 +2776,7 @@ vint64m8_t test_vnmsac_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsac_vx_i64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_i64m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_mu( @@ -2785,7 +2785,7 @@ vint64m8_t test_vnmsac_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_mu( @@ -2794,7 +2794,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsac_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_mu( @@ -2803,7 +2803,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_mu( @@ -2812,7 +2812,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsac_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_mu( @@ -2821,7 +2821,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_mu( @@ -2830,7 +2830,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsac_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_mu( @@ -2839,7 +2839,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vv_u8m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_mu( @@ -2848,7 +2848,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsac_vx_u8m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_mu( @@ -2857,7 +2857,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vv_u8m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_mu( @@ -2866,7 +2866,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsac_vx_u8m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_mu( @@ -2875,7 +2875,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vv_u8m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_mu( @@ -2884,7 +2884,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsac_vx_u8m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_mu( @@ -2893,7 +2893,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vv_u8m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u8m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_mu( @@ -2902,7 +2902,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsac_vx_u8m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u8m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_mu( @@ -2911,7 +2911,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_mu( @@ -2920,7 +2920,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsac_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_mu( @@ -2929,7 +2929,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_mu( @@ -2938,7 +2938,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsac_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_mu( @@ -2947,7 +2947,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vv_u16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_mu( @@ -2956,7 +2956,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsac_vx_u16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_mu( @@ -2965,7 +2965,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vv_u16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_mu( @@ -2974,7 +2974,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsac_vx_u16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_mu( @@ -2983,7 +2983,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vv_u16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_mu( @@ -2992,7 +2992,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsac_vx_u16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_mu( @@ -3001,7 +3001,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vv_u16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_mu( @@ -3010,7 +3010,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsac_vx_u16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_mu( @@ -3019,7 +3019,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_mu( @@ -3028,7 +3028,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsac_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_mu( @@ -3037,7 +3037,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vv_u32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_mu( @@ -3046,7 +3046,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsac_vx_u32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_mu( @@ -3055,7 +3055,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vv_u32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_mu( @@ -3064,7 +3064,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsac_vx_u32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_mu( @@ -3073,7 +3073,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vv_u32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_mu( @@ -3082,7 +3082,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsac_vx_u32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_mu( @@ -3091,7 +3091,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vv_u32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_mu( @@ -3100,7 +3100,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsac_vx_u32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_mu( @@ -3109,7 +3109,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vv_u64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_mu( @@ -3118,7 +3118,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsac_vx_u64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_mu( @@ -3127,7 +3127,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vv_u64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_mu( @@ -3136,7 +3136,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsac_vx_u64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_mu( @@ -3145,7 +3145,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vv_u64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_mu( @@ -3154,7 +3154,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsac_vx_u64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_mu( @@ -3163,7 +3163,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vv_u64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsac_vv_u64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_mu( @@ -3172,6 +3172,6 @@ vuint64m8_t test_vnmsac_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsac_vx_u64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsac_vx_u64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsub.c index 9850be099f1e..c5511a2ea2b3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnmsub.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vv_i8mf8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_tu( @@ -22,7 +22,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vx_i8mf8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_tu( @@ -31,7 +31,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vv_i8mf4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_tu( @@ -40,7 +40,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vx_i8mf4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_tu( @@ -49,7 +49,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vv_i8mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_tu( @@ -58,7 +58,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vx_i8mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_tu( @@ -67,7 +67,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vv_i8m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_tu( @@ -76,7 +76,7 @@ vint8m1_t test_vnmsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vx_i8m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_tu( @@ -85,7 +85,7 @@ vint8m1_t test_vnmsub_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vv_i8m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_tu( @@ -94,7 +94,7 @@ vint8m2_t test_vnmsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vx_i8m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_tu( @@ -103,7 +103,7 @@ vint8m2_t test_vnmsub_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vv_i8m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_tu( @@ -112,7 +112,7 @@ vint8m4_t test_vnmsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vx_i8m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_tu( @@ -121,7 +121,7 @@ vint8m4_t test_vnmsub_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vv_i8m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_tu( @@ -130,7 +130,7 @@ vint8m8_t test_vnmsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vx_i8m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_tu( @@ -139,7 +139,7 @@ vint8m8_t test_vnmsub_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vv_i16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_tu( @@ -148,7 +148,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vx_i16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_tu( @@ -157,7 +157,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vv_i16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_tu( @@ -166,7 +166,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vx_i16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_tu( @@ -175,7 +175,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vv_i16m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_tu( @@ -184,7 +184,7 @@ vint16m1_t test_vnmsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vx_i16m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_tu( @@ -193,7 +193,7 @@ vint16m1_t test_vnmsub_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vv_i16m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_tu( @@ -202,7 +202,7 @@ vint16m2_t test_vnmsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vx_i16m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_tu( @@ -211,7 +211,7 @@ vint16m2_t test_vnmsub_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vv_i16m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_tu( @@ -220,7 +220,7 @@ vint16m4_t test_vnmsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vx_i16m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_tu( @@ -229,7 +229,7 @@ vint16m4_t test_vnmsub_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vv_i16m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_tu( @@ -238,7 +238,7 @@ vint16m8_t test_vnmsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vx_i16m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tu( @@ -247,7 +247,7 @@ vint16m8_t test_vnmsub_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vv_i32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tu( @@ -256,7 +256,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vx_i32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_tu( @@ -265,7 +265,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vv_i32m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_tu( @@ -274,7 +274,7 @@ vint32m1_t test_vnmsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vx_i32m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_tu( @@ -283,7 +283,7 @@ vint32m1_t test_vnmsub_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vv_i32m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_tu( @@ -292,7 +292,7 @@ vint32m2_t test_vnmsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vx_i32m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_tu( @@ -301,7 +301,7 @@ vint32m2_t test_vnmsub_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vv_i32m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_tu( @@ -310,7 +310,7 @@ vint32m4_t test_vnmsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vx_i32m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_tu( @@ -319,7 +319,7 @@ vint32m4_t test_vnmsub_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vv_i32m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_tu( @@ -328,7 +328,7 @@ vint32m8_t test_vnmsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vx_i32m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_tu( @@ -337,7 +337,7 @@ vint32m8_t test_vnmsub_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vv_i64m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_tu( @@ -346,7 +346,7 @@ vint64m1_t test_vnmsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vx_i64m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_tu( @@ -355,7 +355,7 @@ vint64m1_t test_vnmsub_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vv_i64m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_tu( @@ -364,7 +364,7 @@ vint64m2_t test_vnmsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vx_i64m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_tu( @@ -373,7 +373,7 @@ vint64m2_t test_vnmsub_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vv_i64m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_tu( @@ -382,7 +382,7 @@ vint64m4_t test_vnmsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vx_i64m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_tu( @@ -391,7 +391,7 @@ vint64m4_t test_vnmsub_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vv_i64m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_tu( @@ -400,7 +400,7 @@ vint64m8_t test_vnmsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vx_i64m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_tu( @@ -409,7 +409,7 @@ vint64m8_t test_vnmsub_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vv_u8mf8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_tu( @@ -418,7 +418,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vx_u8mf8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_tu( @@ -427,7 +427,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vv_u8mf4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_tu( @@ -436,7 +436,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vx_u8mf4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_tu( @@ -445,7 +445,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vv_u8mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_tu( @@ -454,7 +454,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vx_u8mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_tu( @@ -463,7 +463,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vv_u8m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_tu( @@ -472,7 +472,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vx_u8m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_tu( @@ -481,7 +481,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vv_u8m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_tu( @@ -490,7 +490,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vx_u8m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_tu( @@ -499,7 +499,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vv_u8m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_tu( @@ -508,7 +508,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vx_u8m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_tu( @@ -517,7 +517,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vv_u8m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_tu( @@ -526,7 +526,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vx_u8m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_tu( @@ -535,7 +535,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vv_u16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_tu( @@ -544,7 +544,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vx_u16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_tu( @@ -553,7 +553,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vv_u16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_tu( @@ -562,7 +562,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vx_u16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_tu( @@ -571,7 +571,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vv_u16m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_tu( @@ -580,7 +580,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vx_u16m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_tu( @@ -589,7 +589,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vv_u16m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_tu( @@ -598,7 +598,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vx_u16m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_tu( @@ -607,7 +607,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vv_u16m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_tu( @@ -616,7 +616,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vx_u16m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_tu( @@ -625,7 +625,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vv_u16m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_tu( @@ -634,7 +634,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vx_u16m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tu( @@ -643,7 +643,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vv_u32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tu( @@ -652,7 +652,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vx_u32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_tu( @@ -661,7 +661,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vv_u32m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_tu( @@ -670,7 +670,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vx_u32m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_tu( @@ -679,7 +679,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vv_u32m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_tu( @@ -688,7 +688,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vx_u32m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_tu( @@ -697,7 +697,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vv_u32m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_tu( @@ -706,7 +706,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vx_u32m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_tu( @@ -715,7 +715,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vv_u32m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_tu( @@ -724,7 +724,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vx_u32m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_tu( @@ -733,7 +733,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vv_u64m1_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_tu( @@ -742,7 +742,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vx_u64m1_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_tu( @@ -751,7 +751,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vv_u64m2_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_tu( @@ -760,7 +760,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vx_u64m2_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_tu( @@ -769,7 +769,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vv_u64m4_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_tu( @@ -778,7 +778,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vx_u64m4_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_tu( @@ -787,7 +787,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vv_u64m8_tu(vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_tu( @@ -796,7 +796,7 @@ vuint64m8_t test_vnmsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vx_u64m8_tu(vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_tum( @@ -805,7 +805,7 @@ vuint64m8_t test_vnmsub_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_tum( @@ -814,7 +814,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_tum( @@ -823,7 +823,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_tum( @@ -832,7 +832,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_tum( @@ -841,7 +841,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_tum( @@ -850,7 +850,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_tum( @@ -859,7 +859,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vv_i8m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_tum( @@ -868,7 +868,7 @@ vint8m1_t test_vnmsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vx_i8m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_tum( @@ -877,7 +877,7 @@ vint8m1_t test_vnmsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vv_i8m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_tum( @@ -886,7 +886,7 @@ vint8m2_t test_vnmsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vx_i8m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_tum( @@ -895,7 +895,7 @@ vint8m2_t test_vnmsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vv_i8m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_tum( @@ -904,7 +904,7 @@ vint8m4_t test_vnmsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vx_i8m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_tum( @@ -913,7 +913,7 @@ vint8m4_t test_vnmsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vv_i8m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_tum( @@ -922,7 +922,7 @@ vint8m8_t test_vnmsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vx_i8m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_tum( @@ -931,7 +931,7 @@ vint8m8_t test_vnmsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_tum( @@ -940,7 +940,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_tum( @@ -949,7 +949,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_tum( @@ -958,7 +958,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_tum( @@ -967,7 +967,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vv_i16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_tum( @@ -976,7 +976,7 @@ vint16m1_t test_vnmsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vx_i16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_tum( @@ -985,7 +985,7 @@ vint16m1_t test_vnmsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vv_i16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_tum( @@ -994,7 +994,7 @@ vint16m2_t test_vnmsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vx_i16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_tum( @@ -1003,7 +1003,7 @@ vint16m2_t test_vnmsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vv_i16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_tum( @@ -1012,7 +1012,7 @@ vint16m4_t test_vnmsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vx_i16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_tum( @@ -1021,7 +1021,7 @@ vint16m4_t test_vnmsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vv_i16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_tum( @@ -1030,7 +1030,7 @@ vint16m8_t test_vnmsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vx_i16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tum( @@ -1039,7 +1039,7 @@ vint16m8_t test_vnmsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tum( @@ -1048,7 +1048,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_tum( @@ -1057,7 +1057,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vv_i32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_tum( @@ -1066,7 +1066,7 @@ vint32m1_t test_vnmsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vx_i32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_tum( @@ -1075,7 +1075,7 @@ vint32m1_t test_vnmsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vv_i32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_tum( @@ -1084,7 +1084,7 @@ vint32m2_t test_vnmsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vx_i32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_tum( @@ -1093,7 +1093,7 @@ vint32m2_t test_vnmsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vv_i32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_tum( @@ -1102,7 +1102,7 @@ vint32m4_t test_vnmsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vx_i32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_tum( @@ -1111,7 +1111,7 @@ vint32m4_t test_vnmsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vv_i32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_tum( @@ -1120,7 +1120,7 @@ vint32m8_t test_vnmsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vx_i32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_tum( @@ -1129,7 +1129,7 @@ vint32m8_t test_vnmsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vv_i64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_tum( @@ -1138,7 +1138,7 @@ vint64m1_t test_vnmsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vx_i64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_tum( @@ -1147,7 +1147,7 @@ vint64m1_t test_vnmsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vv_i64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_tum( @@ -1156,7 +1156,7 @@ vint64m2_t test_vnmsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vx_i64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_tum( @@ -1165,7 +1165,7 @@ vint64m2_t test_vnmsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vv_i64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_tum( @@ -1174,7 +1174,7 @@ vint64m4_t test_vnmsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vx_i64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_tum( @@ -1183,7 +1183,7 @@ vint64m4_t test_vnmsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vv_i64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_tum( @@ -1192,7 +1192,7 @@ vint64m8_t test_vnmsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vx_i64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_tum( @@ -1201,7 +1201,7 @@ vint64m8_t test_vnmsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_tum( @@ -1210,7 +1210,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_tum( @@ -1219,7 +1219,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_tum( @@ -1228,7 +1228,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_tum( @@ -1237,7 +1237,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_tum( @@ -1246,7 +1246,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_tum( @@ -1255,7 +1255,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vv_u8m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_tum( @@ -1264,7 +1264,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vx_u8m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_tum( @@ -1273,7 +1273,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vv_u8m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_tum( @@ -1282,7 +1282,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vx_u8m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_tum( @@ -1291,7 +1291,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vv_u8m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_tum( @@ -1300,7 +1300,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vx_u8m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_tum( @@ -1309,7 +1309,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vv_u8m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_tum( @@ -1318,7 +1318,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vx_u8m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_tum( @@ -1327,7 +1327,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_tum( @@ -1336,7 +1336,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_tum( @@ -1345,7 +1345,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_tum( @@ -1354,7 +1354,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_tum( @@ -1363,7 +1363,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vv_u16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_tum( @@ -1372,7 +1372,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vx_u16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_tum( @@ -1381,7 +1381,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vv_u16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_tum( @@ -1390,7 +1390,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vx_u16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_tum( @@ -1399,7 +1399,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vv_u16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_tum( @@ -1408,7 +1408,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vx_u16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_tum( @@ -1417,7 +1417,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vv_u16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_tum( @@ -1426,7 +1426,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vx_u16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tum( @@ -1435,7 +1435,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tum( @@ -1444,7 +1444,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_tum( @@ -1453,7 +1453,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vv_u32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_tum( @@ -1462,7 +1462,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vx_u32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_tum( @@ -1471,7 +1471,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vv_u32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_tum( @@ -1480,7 +1480,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vx_u32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_tum( @@ -1489,7 +1489,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vv_u32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_tum( @@ -1498,7 +1498,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vx_u32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_tum( @@ -1507,7 +1507,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vv_u32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_tum( @@ -1516,7 +1516,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vx_u32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_tum( @@ -1525,7 +1525,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vv_u64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_tum( @@ -1534,7 +1534,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vx_u64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_tum( @@ -1543,7 +1543,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vv_u64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_tum( @@ -1552,7 +1552,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vx_u64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_tum( @@ -1561,7 +1561,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vv_u64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_tum( @@ -1570,7 +1570,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vx_u64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_tum( @@ -1579,7 +1579,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vv_u64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_tum( @@ -1588,7 +1588,7 @@ vuint64m8_t test_vnmsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vx_u64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_tumu( @@ -1597,7 +1597,7 @@ vuint64m8_t test_vnmsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_tumu( @@ -1606,7 +1606,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_tumu( @@ -1615,7 +1615,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_tumu( @@ -1624,7 +1624,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_tumu( @@ -1633,7 +1633,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_tumu( @@ -1642,7 +1642,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_tumu( @@ -1651,7 +1651,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_tumu( @@ -1660,7 +1660,7 @@ vint8m1_t test_vnmsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_tumu( @@ -1669,7 +1669,7 @@ vint8m1_t test_vnmsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_tumu( @@ -1678,7 +1678,7 @@ vint8m2_t test_vnmsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_tumu( @@ -1687,7 +1687,7 @@ vint8m2_t test_vnmsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_tumu( @@ -1696,7 +1696,7 @@ vint8m4_t test_vnmsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_tumu( @@ -1705,7 +1705,7 @@ vint8m4_t test_vnmsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_tumu( @@ -1714,7 +1714,7 @@ vint8m8_t test_vnmsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_tumu( @@ -1723,7 +1723,7 @@ vint8m8_t test_vnmsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_tumu( @@ -1732,7 +1732,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_tumu( @@ -1741,7 +1741,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_tumu( @@ -1750,7 +1750,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_tumu( @@ -1759,7 +1759,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_tumu( @@ -1768,7 +1768,7 @@ vint16m1_t test_vnmsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_tumu( @@ -1777,7 +1777,7 @@ vint16m1_t test_vnmsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_tumu( @@ -1786,7 +1786,7 @@ vint16m2_t test_vnmsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_tumu( @@ -1795,7 +1795,7 @@ vint16m2_t test_vnmsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_tumu( @@ -1804,7 +1804,7 @@ vint16m4_t test_vnmsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_tumu( @@ -1813,7 +1813,7 @@ vint16m4_t test_vnmsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_tumu( @@ -1822,7 +1822,7 @@ vint16m8_t test_vnmsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_tumu( @@ -1831,7 +1831,7 @@ vint16m8_t test_vnmsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_tumu( @@ -1840,7 +1840,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_tumu( @@ -1849,7 +1849,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_tumu( @@ -1858,7 +1858,7 @@ vint32m1_t test_vnmsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_tumu( @@ -1867,7 +1867,7 @@ vint32m1_t test_vnmsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_tumu( @@ -1876,7 +1876,7 @@ vint32m2_t test_vnmsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_tumu( @@ -1885,7 +1885,7 @@ vint32m2_t test_vnmsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_tumu( @@ -1894,7 +1894,7 @@ vint32m4_t test_vnmsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_tumu( @@ -1903,7 +1903,7 @@ vint32m4_t test_vnmsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_tumu( @@ -1912,7 +1912,7 @@ vint32m8_t test_vnmsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_tumu( @@ -1921,7 +1921,7 @@ vint32m8_t test_vnmsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_tumu( @@ -1930,7 +1930,7 @@ vint64m1_t test_vnmsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_tumu( @@ -1939,7 +1939,7 @@ vint64m1_t test_vnmsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_tumu( @@ -1948,7 +1948,7 @@ vint64m2_t test_vnmsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_tumu( @@ -1957,7 +1957,7 @@ vint64m2_t test_vnmsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_tumu( @@ -1966,7 +1966,7 @@ vint64m4_t test_vnmsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_tumu( @@ -1975,7 +1975,7 @@ vint64m4_t test_vnmsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_tumu( @@ -1984,7 +1984,7 @@ vint64m8_t test_vnmsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_tumu( @@ -1993,7 +1993,7 @@ vint64m8_t test_vnmsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_tumu( @@ -2002,7 +2002,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_tumu( @@ -2011,7 +2011,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_tumu( @@ -2020,7 +2020,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_tumu( @@ -2029,7 +2029,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_tumu( @@ -2038,7 +2038,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_tumu( @@ -2047,7 +2047,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_tumu( @@ -2056,7 +2056,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_tumu( @@ -2065,7 +2065,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_tumu( @@ -2074,7 +2074,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_tumu( @@ -2083,7 +2083,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_tumu( @@ -2092,7 +2092,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_tumu( @@ -2101,7 +2101,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_tumu( @@ -2110,7 +2110,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_tumu( @@ -2119,7 +2119,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_tumu( @@ -2128,7 +2128,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_tumu( @@ -2137,7 +2137,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_tumu( @@ -2146,7 +2146,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_tumu( @@ -2155,7 +2155,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_tumu( @@ -2164,7 +2164,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_tumu( @@ -2173,7 +2173,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_tumu( @@ -2182,7 +2182,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_tumu( @@ -2191,7 +2191,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_tumu( @@ -2200,7 +2200,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_tumu( @@ -2209,7 +2209,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_tumu( @@ -2218,7 +2218,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_tumu( @@ -2227,7 +2227,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_tumu( @@ -2236,7 +2236,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_tumu( @@ -2245,7 +2245,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_tumu( @@ -2254,7 +2254,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_tumu( @@ -2263,7 +2263,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_tumu( @@ -2272,7 +2272,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_tumu( @@ -2281,7 +2281,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_tumu( @@ -2290,7 +2290,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_tumu( @@ -2299,7 +2299,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_tumu( @@ -2308,7 +2308,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_tumu( @@ -2317,7 +2317,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_tumu( @@ -2326,7 +2326,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_tumu( @@ -2335,7 +2335,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_tumu( @@ -2344,7 +2344,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_tumu( @@ -2353,7 +2353,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_tumu( @@ -2362,7 +2362,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_tumu( @@ -2371,7 +2371,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_tumu( @@ -2380,7 +2380,7 @@ vuint64m8_t test_vnmsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_mu( @@ -2389,7 +2389,7 @@ vuint64m8_t test_vnmsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_mu( @@ -2398,7 +2398,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vnmsub_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_mu( @@ -2407,7 +2407,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_mu( @@ -2416,7 +2416,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vnmsub_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_mu( @@ -2425,7 +2425,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_mu( @@ -2434,7 +2434,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vnmsub_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_mu( @@ -2443,7 +2443,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vv_i8m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_mu( @@ -2452,7 +2452,7 @@ vint8m1_t test_vnmsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vnmsub_vx_i8m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_mu( @@ -2461,7 +2461,7 @@ vint8m1_t test_vnmsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vv_i8m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_mu( @@ -2470,7 +2470,7 @@ vint8m2_t test_vnmsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vnmsub_vx_i8m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_mu( @@ -2479,7 +2479,7 @@ vint8m2_t test_vnmsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vv_i8m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_mu( @@ -2488,7 +2488,7 @@ vint8m4_t test_vnmsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vnmsub_vx_i8m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_mu( @@ -2497,7 +2497,7 @@ vint8m4_t test_vnmsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vv_i8m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i8m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_mu( @@ -2506,7 +2506,7 @@ vint8m8_t test_vnmsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return vnmsub_vx_i8m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i8m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_mu( @@ -2515,7 +2515,7 @@ vint8m8_t test_vnmsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_mu( @@ -2524,7 +2524,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vnmsub_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_mu( @@ -2533,7 +2533,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_mu( @@ -2542,7 +2542,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vnmsub_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_mu( @@ -2551,7 +2551,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vv_i16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_mu( @@ -2560,7 +2560,7 @@ vint16m1_t test_vnmsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vnmsub_vx_i16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_mu( @@ -2569,7 +2569,7 @@ vint16m1_t test_vnmsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vv_i16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_mu( @@ -2578,7 +2578,7 @@ vint16m2_t test_vnmsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vnmsub_vx_i16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_mu( @@ -2587,7 +2587,7 @@ vint16m2_t test_vnmsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vv_i16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_mu( @@ -2596,7 +2596,7 @@ vint16m4_t test_vnmsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vnmsub_vx_i16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_mu( @@ -2605,7 +2605,7 @@ vint16m4_t test_vnmsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vv_i16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_mu( @@ -2614,7 +2614,7 @@ vint16m8_t test_vnmsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return vnmsub_vx_i16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_mu( @@ -2623,7 +2623,7 @@ vint16m8_t test_vnmsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_mu( @@ -2632,7 +2632,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vnmsub_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_mu( @@ -2641,7 +2641,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vv_i32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_mu( @@ -2650,7 +2650,7 @@ vint32m1_t test_vnmsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vnmsub_vx_i32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_mu( @@ -2659,7 +2659,7 @@ vint32m1_t test_vnmsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vv_i32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_mu( @@ -2668,7 +2668,7 @@ vint32m2_t test_vnmsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vnmsub_vx_i32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_mu( @@ -2677,7 +2677,7 @@ vint32m2_t test_vnmsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vv_i32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_mu( @@ -2686,7 +2686,7 @@ vint32m4_t test_vnmsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vnmsub_vx_i32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_mu( @@ -2695,7 +2695,7 @@ vint32m4_t test_vnmsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vv_i32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_mu( @@ -2704,7 +2704,7 @@ vint32m8_t test_vnmsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return vnmsub_vx_i32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_mu( @@ -2713,7 +2713,7 @@ vint32m8_t test_vnmsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vv_i64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_mu( @@ -2722,7 +2722,7 @@ vint64m1_t test_vnmsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return vnmsub_vx_i64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_mu( @@ -2731,7 +2731,7 @@ vint64m1_t test_vnmsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vv_i64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_mu( @@ -2740,7 +2740,7 @@ vint64m2_t test_vnmsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return vnmsub_vx_i64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_mu( @@ -2749,7 +2749,7 @@ vint64m2_t test_vnmsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vv_i64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_mu( @@ -2758,7 +2758,7 @@ vint64m4_t test_vnmsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return vnmsub_vx_i64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_mu( @@ -2767,7 +2767,7 @@ vint64m4_t test_vnmsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vv_i64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_i64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_mu( @@ -2776,7 +2776,7 @@ vint64m8_t test_vnmsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return vnmsub_vx_i64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_i64m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_mu( @@ -2785,7 +2785,7 @@ vint64m8_t test_vnmsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_mu( @@ -2794,7 +2794,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vnmsub_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_mu( @@ -2803,7 +2803,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_mu( @@ -2812,7 +2812,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vnmsub_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_mu( @@ -2821,7 +2821,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_mu( @@ -2830,7 +2830,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vnmsub_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_mu( @@ -2839,7 +2839,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vv_u8m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_mu( @@ -2848,7 +2848,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vnmsub_vx_u8m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_mu( @@ -2857,7 +2857,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vv_u8m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_mu( @@ -2866,7 +2866,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vnmsub_vx_u8m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_mu( @@ -2875,7 +2875,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vv_u8m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_mu( @@ -2884,7 +2884,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vnmsub_vx_u8m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_mu( @@ -2893,7 +2893,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vv_u8m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u8m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_mu( @@ -2902,7 +2902,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return vnmsub_vx_u8m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u8m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_mu( @@ -2911,7 +2911,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_mu( @@ -2920,7 +2920,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vnmsub_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_mu( @@ -2929,7 +2929,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_mu( @@ -2938,7 +2938,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vnmsub_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_mu( @@ -2947,7 +2947,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vv_u16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_mu( @@ -2956,7 +2956,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vnmsub_vx_u16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_mu( @@ -2965,7 +2965,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vv_u16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_mu( @@ -2974,7 +2974,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vnmsub_vx_u16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_mu( @@ -2983,7 +2983,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vv_u16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_mu( @@ -2992,7 +2992,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vnmsub_vx_u16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_mu( @@ -3001,7 +3001,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vv_u16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_mu( @@ -3010,7 +3010,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return vnmsub_vx_u16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_mu( @@ -3019,7 +3019,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_mu( @@ -3028,7 +3028,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vnmsub_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_mu( @@ -3037,7 +3037,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vv_u32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_mu( @@ -3046,7 +3046,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vnmsub_vx_u32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_mu( @@ -3055,7 +3055,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vv_u32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_mu( @@ -3064,7 +3064,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vnmsub_vx_u32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_mu( @@ -3073,7 +3073,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vv_u32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_mu( @@ -3082,7 +3082,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vnmsub_vx_u32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_mu( @@ -3091,7 +3091,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vv_u32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_mu( @@ -3100,7 +3100,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return vnmsub_vx_u32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_mu( @@ -3109,7 +3109,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vv_u64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_mu( @@ -3118,7 +3118,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return vnmsub_vx_u64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_mu( @@ -3127,7 +3127,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vv_u64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_mu( @@ -3136,7 +3136,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return vnmsub_vx_u64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_mu( @@ -3145,7 +3145,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vv_u64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_mu( @@ -3154,7 +3154,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return vnmsub_vx_u64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_mu( @@ -3163,7 +3163,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vv_u64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vnmsub_vv_u64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_mu( @@ -3172,6 +3172,6 @@ vuint64m8_t test_vnmsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return vnmsub_vx_u64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vnmsub_vx_u64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnot.c index 561c93102eb0..0de1e11c83a0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnot.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vnot_v_i8mf8_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i8mf8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf4_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vnot_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vnot_v_i8mf4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i8mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf2_tu( @@ -30,7 +30,7 @@ vint8mf4_t test_vnot_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vnot_v_i8mf2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i8mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m1_tu( @@ -39,7 +39,7 @@ vint8mf2_t test_vnot_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vnot_v_i8m1_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i8m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m2_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vnot_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vnot_v_i8m2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i8m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m4_tu( @@ -57,7 +57,7 @@ vint8m2_t test_vnot_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vnot_v_i8m4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i8m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m8_tu( @@ -66,7 +66,7 @@ vint8m4_t test_vnot_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vnot_v_i8m8_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i8m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf4_tu( @@ -75,7 +75,7 @@ vint8m8_t test_vnot_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vnot_v_i16mf4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf2_tu( @@ -84,7 +84,7 @@ vint16mf4_t test_vnot_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vnot_v_i16mf2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m1_tu( @@ -93,7 +93,7 @@ vint16mf2_t test_vnot_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vnot_v_i16m1_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m2_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vnot_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vnot_v_i16m2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m4_tu( @@ -111,7 +111,7 @@ vint16m2_t test_vnot_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vnot_v_i16m4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m8_tu( @@ -120,7 +120,7 @@ vint16m4_t test_vnot_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vnot_v_i16m8_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tu( @@ -129,7 +129,7 @@ vint16m8_t test_vnot_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vnot_v_i32mf2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m1_tu( @@ -138,7 +138,7 @@ vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vnot_v_i32m1_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m2_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vnot_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vnot_v_i32m2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m4_tu( @@ -156,7 +156,7 @@ vint32m2_t test_vnot_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vnot_v_i32m4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m8_tu( @@ -165,7 +165,7 @@ vint32m4_t test_vnot_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vnot_v_i32m8_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m1_tu( @@ -174,7 +174,7 @@ vint32m8_t test_vnot_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vnot_v_i64m1_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m2_tu( @@ -183,7 +183,7 @@ vint64m1_t test_vnot_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vnot_v_i64m2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m4_tu( @@ -192,7 +192,7 @@ vint64m2_t test_vnot_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vnot_v_i64m4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m8_tu( @@ -201,7 +201,7 @@ vint64m4_t test_vnot_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vnot_v_i64m8_tu(maskedoff, op1, vl); + return __riscv_vnot_v_i64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf8_tu( @@ -210,7 +210,7 @@ vint64m8_t test_vnot_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vnot_v_u8mf8_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u8mf8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf4_tu( @@ -219,7 +219,7 @@ vuint8mf8_t test_vnot_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vnot_v_u8mf4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u8mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf2_tu( @@ -228,7 +228,7 @@ vuint8mf4_t test_vnot_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vnot_v_u8mf2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u8mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m1_tu( @@ -237,7 +237,7 @@ vuint8mf2_t test_vnot_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return vnot_v_u8m1_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u8m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m2_tu( @@ -246,7 +246,7 @@ vuint8m1_t test_vnot_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return vnot_v_u8m2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u8m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m4_tu( @@ -255,7 +255,7 @@ vuint8m2_t test_vnot_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return vnot_v_u8m4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u8m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m8_tu( @@ -264,7 +264,7 @@ vuint8m4_t test_vnot_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return vnot_v_u8m8_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u8m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf4_tu( @@ -273,7 +273,7 @@ vuint8m8_t test_vnot_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vnot_v_u16mf4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf2_tu( @@ -282,7 +282,7 @@ vuint16mf4_t test_vnot_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vnot_v_u16mf2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m1_tu( @@ -291,7 +291,7 @@ vuint16mf2_t test_vnot_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return vnot_v_u16m1_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m2_tu( @@ -300,7 +300,7 @@ vuint16m1_t test_vnot_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return vnot_v_u16m2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m4_tu( @@ -309,7 +309,7 @@ vuint16m2_t test_vnot_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return vnot_v_u16m4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m8_tu( @@ -318,7 +318,7 @@ vuint16m4_t test_vnot_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return vnot_v_u16m8_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tu( @@ -327,7 +327,7 @@ vuint16m8_t test_vnot_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vnot_v_u32mf2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m1_tu( @@ -336,7 +336,7 @@ vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return vnot_v_u32m1_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m2_tu( @@ -345,7 +345,7 @@ vuint32m1_t test_vnot_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return vnot_v_u32m2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m4_tu( @@ -354,7 +354,7 @@ vuint32m2_t test_vnot_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return vnot_v_u32m4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m8_tu( @@ -363,7 +363,7 @@ vuint32m4_t test_vnot_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return vnot_v_u32m8_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m1_tu( @@ -372,7 +372,7 @@ vuint32m8_t test_vnot_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return vnot_v_u64m1_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m2_tu( @@ -381,7 +381,7 @@ vuint64m1_t test_vnot_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return vnot_v_u64m2_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m4_tu( @@ -390,7 +390,7 @@ vuint64m2_t test_vnot_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return vnot_v_u64m4_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m4_t test_vnot_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return vnot_v_u64m8_tu(maskedoff, op1, vl); + return __riscv_vnot_v_u64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vnot_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vnot_v_i8mf8_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8mf8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf4_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vnot_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vnot_v_i8mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf2_tum( @@ -426,7 +426,7 @@ vint8mf4_t test_vnot_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vnot_v_i8mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m1_tum( @@ -435,7 +435,7 @@ vint8mf2_t test_vnot_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vnot_v_i8m1_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m2_tum( @@ -444,7 +444,7 @@ vint8m1_t test_vnot_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vnot_v_i8m2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m4_tum( @@ -453,7 +453,7 @@ vint8m2_t test_vnot_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vnot_v_i8m4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m8_tum( @@ -462,7 +462,7 @@ vint8m4_t test_vnot_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vnot_v_i8m8_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf4_tum( @@ -471,7 +471,7 @@ vint8m8_t test_vnot_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vnot_v_i16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf2_tum( @@ -480,7 +480,7 @@ vint16mf4_t test_vnot_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vnot_v_i16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m1_tum( @@ -489,7 +489,7 @@ vint16mf2_t test_vnot_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vnot_v_i16m1_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m2_tum( @@ -498,7 +498,7 @@ vint16m1_t test_vnot_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vnot_v_i16m2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m4_tum( @@ -507,7 +507,7 @@ vint16m2_t test_vnot_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vnot_v_i16m4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m8_tum( @@ -516,7 +516,7 @@ vint16m4_t test_vnot_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vnot_v_i16m8_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tum( @@ -525,7 +525,7 @@ vint16m8_t test_vnot_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vnot_v_i32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m1_tum( @@ -534,7 +534,7 @@ vint32mf2_t test_vnot_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vnot_v_i32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m2_tum( @@ -543,7 +543,7 @@ vint32m1_t test_vnot_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vnot_v_i32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m4_tum( @@ -552,7 +552,7 @@ vint32m2_t test_vnot_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vnot_v_i32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m8_tum( @@ -561,7 +561,7 @@ vint32m4_t test_vnot_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vnot_v_i32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m1_tum( @@ -570,7 +570,7 @@ vint32m8_t test_vnot_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vnot_v_i64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m2_tum( @@ -579,7 +579,7 @@ vint64m1_t test_vnot_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vnot_v_i64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m4_tum( @@ -588,7 +588,7 @@ vint64m2_t test_vnot_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vnot_v_i64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m8_tum( @@ -597,7 +597,7 @@ vint64m4_t test_vnot_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vnot_v_i64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf8_tum( @@ -606,7 +606,7 @@ vint64m8_t test_vnot_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vnot_v_u8mf8_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8mf8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf4_tum( @@ -615,7 +615,7 @@ vuint8mf8_t test_vnot_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vnot_v_u8mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf2_tum( @@ -624,7 +624,7 @@ vuint8mf4_t test_vnot_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vnot_v_u8mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m1_tum( @@ -633,7 +633,7 @@ vuint8mf2_t test_vnot_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return vnot_v_u8m1_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m2_tum( @@ -642,7 +642,7 @@ vuint8m1_t test_vnot_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return vnot_v_u8m2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m4_tum( @@ -651,7 +651,7 @@ vuint8m2_t test_vnot_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return vnot_v_u8m4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m8_tum( @@ -660,7 +660,7 @@ vuint8m4_t test_vnot_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return vnot_v_u8m8_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf4_tum( @@ -669,7 +669,7 @@ vuint8m8_t test_vnot_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vnot_v_u16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf2_tum( @@ -678,7 +678,7 @@ vuint16mf4_t test_vnot_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vnot_v_u16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m1_tum( @@ -687,7 +687,7 @@ vuint16mf2_t test_vnot_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return vnot_v_u16m1_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m2_tum( @@ -696,7 +696,7 @@ vuint16m1_t test_vnot_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return vnot_v_u16m2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m4_tum( @@ -705,7 +705,7 @@ vuint16m2_t test_vnot_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return vnot_v_u16m4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m8_tum( @@ -714,7 +714,7 @@ vuint16m4_t test_vnot_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return vnot_v_u16m8_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tum( @@ -723,7 +723,7 @@ vuint16m8_t test_vnot_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vnot_v_u32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m1_tum( @@ -732,7 +732,7 @@ vuint32mf2_t test_vnot_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return vnot_v_u32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m2_tum( @@ -741,7 +741,7 @@ vuint32m1_t test_vnot_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return vnot_v_u32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m4_tum( @@ -750,7 +750,7 @@ vuint32m2_t test_vnot_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return vnot_v_u32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m8_tum( @@ -759,7 +759,7 @@ vuint32m4_t test_vnot_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return vnot_v_u32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m1_tum( @@ -768,7 +768,7 @@ vuint32m8_t test_vnot_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return vnot_v_u64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m2_tum( @@ -777,7 +777,7 @@ vuint64m1_t test_vnot_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return vnot_v_u64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m4_tum( @@ -786,7 +786,7 @@ vuint64m2_t test_vnot_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return vnot_v_u64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m4_t test_vnot_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return vnot_v_u64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vnot_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vnot_v_i8mf8_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8mf8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf4_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vnot_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vnot_v_i8mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf2_tumu( @@ -822,7 +822,7 @@ vint8mf4_t test_vnot_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vnot_v_i8mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m1_tumu( @@ -831,7 +831,7 @@ vint8mf2_t test_vnot_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vnot_v_i8m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m2_tumu( @@ -840,7 +840,7 @@ vint8m1_t test_vnot_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vnot_v_i8m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m4_tumu( @@ -849,7 +849,7 @@ vint8m2_t test_vnot_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vnot_v_i8m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m8_tumu( @@ -858,7 +858,7 @@ vint8m4_t test_vnot_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vnot_v_i8m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf4_tumu( @@ -867,7 +867,7 @@ vint8m8_t test_vnot_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vnot_v_i16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf2_tumu( @@ -876,7 +876,7 @@ vint16mf4_t test_vnot_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vnot_v_i16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m1_tumu( @@ -885,7 +885,7 @@ vint16mf2_t test_vnot_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vnot_v_i16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m2_tumu( @@ -894,7 +894,7 @@ vint16m1_t test_vnot_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vnot_v_i16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m4_tumu( @@ -903,7 +903,7 @@ vint16m2_t test_vnot_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vnot_v_i16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m8_tumu( @@ -912,7 +912,7 @@ vint16m4_t test_vnot_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vnot_v_i16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tumu( @@ -921,7 +921,7 @@ vint16m8_t test_vnot_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vnot_v_i32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m1_tumu( @@ -930,7 +930,7 @@ vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vnot_v_i32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m2_tumu( @@ -939,7 +939,7 @@ vint32m1_t test_vnot_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vnot_v_i32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m4_tumu( @@ -948,7 +948,7 @@ vint32m2_t test_vnot_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vnot_v_i32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m8_tumu( @@ -957,7 +957,7 @@ vint32m4_t test_vnot_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vnot_v_i32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m1_tumu( @@ -966,7 +966,7 @@ vint32m8_t test_vnot_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vnot_v_i64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m2_tumu( @@ -975,7 +975,7 @@ vint64m1_t test_vnot_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vnot_v_i64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m4_tumu( @@ -984,7 +984,7 @@ vint64m2_t test_vnot_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vnot_v_i64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m8_tumu( @@ -993,7 +993,7 @@ vint64m4_t test_vnot_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vnot_v_i64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf8_tumu( @@ -1002,7 +1002,7 @@ vint64m8_t test_vnot_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vnot_v_u8mf8_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8mf8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf4_tumu( @@ -1011,7 +1011,7 @@ vuint8mf8_t test_vnot_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vnot_v_u8mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf2_tumu( @@ -1020,7 +1020,7 @@ vuint8mf4_t test_vnot_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vnot_v_u8mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m1_tumu( @@ -1029,7 +1029,7 @@ vuint8mf2_t test_vnot_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return vnot_v_u8m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m2_tumu( @@ -1038,7 +1038,7 @@ vuint8m1_t test_vnot_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return vnot_v_u8m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m4_tumu( @@ -1047,7 +1047,7 @@ vuint8m2_t test_vnot_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return vnot_v_u8m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m8_tumu( @@ -1056,7 +1056,7 @@ vuint8m4_t test_vnot_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return vnot_v_u8m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf4_tumu( @@ -1065,7 +1065,7 @@ vuint8m8_t test_vnot_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vnot_v_u16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf2_tumu( @@ -1074,7 +1074,7 @@ vuint16mf4_t test_vnot_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vnot_v_u16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m1_tumu( @@ -1083,7 +1083,7 @@ vuint16mf2_t test_vnot_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return vnot_v_u16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m2_tumu( @@ -1092,7 +1092,7 @@ vuint16m1_t test_vnot_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return vnot_v_u16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m4_tumu( @@ -1101,7 +1101,7 @@ vuint16m2_t test_vnot_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return vnot_v_u16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m8_tumu( @@ -1110,7 +1110,7 @@ vuint16m4_t test_vnot_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return vnot_v_u16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tumu( @@ -1119,7 +1119,7 @@ vuint16m8_t test_vnot_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vnot_v_u32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m1_tumu( @@ -1128,7 +1128,7 @@ vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return vnot_v_u32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m2_tumu( @@ -1137,7 +1137,7 @@ vuint32m1_t test_vnot_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return vnot_v_u32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m4_tumu( @@ -1146,7 +1146,7 @@ vuint32m2_t test_vnot_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return vnot_v_u32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m8_tumu( @@ -1155,7 +1155,7 @@ vuint32m4_t test_vnot_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return vnot_v_u32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m1_tumu( @@ -1164,7 +1164,7 @@ vuint32m8_t test_vnot_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return vnot_v_u64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m2_tumu( @@ -1173,7 +1173,7 @@ vuint64m1_t test_vnot_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return vnot_v_u64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m4_tumu( @@ -1182,7 +1182,7 @@ vuint64m2_t test_vnot_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return vnot_v_u64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m4_t test_vnot_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return vnot_v_u64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vnot_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vnot_v_i8mf8_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8mf8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf4_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vnot_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vnot_v_i8mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8mf2_mu( @@ -1218,7 +1218,7 @@ vint8mf4_t test_vnot_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vnot_v_i8mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m1_mu( @@ -1227,7 +1227,7 @@ vint8mf2_t test_vnot_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vnot_v_i8m1_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m2_mu( @@ -1236,7 +1236,7 @@ vint8m1_t test_vnot_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vnot_v_i8m2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m4_mu( @@ -1245,7 +1245,7 @@ vint8m2_t test_vnot_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vnot_v_i8m4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i8m8_mu( @@ -1254,7 +1254,7 @@ vint8m4_t test_vnot_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vnot_v_i8m8_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i8m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf4_mu( @@ -1263,7 +1263,7 @@ vint8m8_t test_vnot_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vnot_v_i16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16mf2_mu( @@ -1272,7 +1272,7 @@ vint16mf4_t test_vnot_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vnot_v_i16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m1_mu( @@ -1281,7 +1281,7 @@ vint16mf2_t test_vnot_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vnot_v_i16m1_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m2_mu( @@ -1290,7 +1290,7 @@ vint16m1_t test_vnot_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vnot_v_i16m2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m4_mu( @@ -1299,7 +1299,7 @@ vint16m2_t test_vnot_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vnot_v_i16m4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i16m8_mu( @@ -1308,7 +1308,7 @@ vint16m4_t test_vnot_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vnot_v_i16m8_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32mf2_mu( @@ -1317,7 +1317,7 @@ vint16m8_t test_vnot_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vnot_v_i32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m1_mu( @@ -1326,7 +1326,7 @@ vint32mf2_t test_vnot_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vnot_v_i32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m2_mu( @@ -1335,7 +1335,7 @@ vint32m1_t test_vnot_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vnot_v_i32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m4_mu( @@ -1344,7 +1344,7 @@ vint32m2_t test_vnot_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vnot_v_i32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i32m8_mu( @@ -1353,7 +1353,7 @@ vint32m4_t test_vnot_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vnot_v_i32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m1_mu( @@ -1362,7 +1362,7 @@ vint32m8_t test_vnot_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vnot_v_i64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m2_mu( @@ -1371,7 +1371,7 @@ vint64m1_t test_vnot_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vnot_v_i64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m4_mu( @@ -1380,7 +1380,7 @@ vint64m2_t test_vnot_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vnot_v_i64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_i64m8_mu( @@ -1389,7 +1389,7 @@ vint64m4_t test_vnot_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vnot_v_i64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_i64m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf8_mu( @@ -1398,7 +1398,7 @@ vint64m8_t test_vnot_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vnot_v_u8mf8_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8mf8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf4_mu( @@ -1407,7 +1407,7 @@ vuint8mf8_t test_vnot_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vnot_v_u8mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8mf2_mu( @@ -1416,7 +1416,7 @@ vuint8mf4_t test_vnot_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vnot_v_u8mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m1_mu( @@ -1425,7 +1425,7 @@ vuint8mf2_t test_vnot_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return vnot_v_u8m1_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m2_mu( @@ -1434,7 +1434,7 @@ vuint8m1_t test_vnot_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return vnot_v_u8m2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m4_mu( @@ -1443,7 +1443,7 @@ vuint8m2_t test_vnot_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return vnot_v_u8m4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u8m8_mu( @@ -1452,7 +1452,7 @@ vuint8m4_t test_vnot_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return vnot_v_u8m8_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u8m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf4_mu( @@ -1461,7 +1461,7 @@ vuint8m8_t test_vnot_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vnot_v_u16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16mf2_mu( @@ -1470,7 +1470,7 @@ vuint16mf4_t test_vnot_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vnot_v_u16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m1_mu( @@ -1479,7 +1479,7 @@ vuint16mf2_t test_vnot_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return vnot_v_u16m1_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m2_mu( @@ -1488,7 +1488,7 @@ vuint16m1_t test_vnot_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return vnot_v_u16m2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m4_mu( @@ -1497,7 +1497,7 @@ vuint16m2_t test_vnot_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return vnot_v_u16m4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u16m8_mu( @@ -1506,7 +1506,7 @@ vuint16m4_t test_vnot_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return vnot_v_u16m8_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32mf2_mu( @@ -1515,7 +1515,7 @@ vuint16m8_t test_vnot_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vnot_v_u32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m1_mu( @@ -1524,7 +1524,7 @@ vuint32mf2_t test_vnot_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return vnot_v_u32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m2_mu( @@ -1533,7 +1533,7 @@ vuint32m1_t test_vnot_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return vnot_v_u32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m4_mu( @@ -1542,7 +1542,7 @@ vuint32m2_t test_vnot_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return vnot_v_u32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u32m8_mu( @@ -1551,7 +1551,7 @@ vuint32m4_t test_vnot_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return vnot_v_u32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m1_mu( @@ -1560,7 +1560,7 @@ vuint32m8_t test_vnot_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return vnot_v_u64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m2_mu( @@ -1569,7 +1569,7 @@ vuint64m1_t test_vnot_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return vnot_v_u64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m4_mu( @@ -1578,7 +1578,7 @@ vuint64m2_t test_vnot_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return vnot_v_u64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vnot_v_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m4_t test_vnot_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return vnot_v_u64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vnot_v_u64m8_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnsra.c index 095ca6b5947c..1fedc3a54f69 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnsra.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsra_wv_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vnsra_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vnsra_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsra_wv_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vnsra_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vnsra_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsra_wv_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vnsra_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vnsra_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsra_wv_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vnsra_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vnsra_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsra_wv_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vnsra_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vnsra_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsra_wv_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vnsra_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vnsra_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shif // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsra_wv_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_tu( @@ -129,7 +129,7 @@ vint16mf4_t test_vnsra_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_tu( @@ -138,7 +138,7 @@ vint16mf4_t test_vnsra_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsra_wv_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_tu( @@ -147,7 +147,7 @@ vint16mf2_t test_vnsra_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_tu( @@ -156,7 +156,7 @@ vint16mf2_t test_vnsra_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsra_wv_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_tu( @@ -165,7 +165,7 @@ vint16m1_t test_vnsra_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_tu( @@ -174,7 +174,7 @@ vint16m1_t test_vnsra_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsra_wv_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_tu( @@ -183,7 +183,7 @@ vint16m2_t test_vnsra_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_tu( @@ -192,7 +192,7 @@ vint16m2_t test_vnsra_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsra_wv_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_tu( @@ -201,7 +201,7 @@ vint16m4_t test_vnsra_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tu( @@ -210,7 +210,7 @@ vint16m4_t test_vnsra_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsra_wv_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tu( @@ -219,7 +219,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_tu( @@ -228,7 +228,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsra_wv_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_tu( @@ -237,7 +237,7 @@ vint32m1_t test_vnsra_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_tu( @@ -246,7 +246,7 @@ vint32m1_t test_vnsra_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsra_wv_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_tu( @@ -255,7 +255,7 @@ vint32m2_t test_vnsra_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_tu( @@ -264,7 +264,7 @@ vint32m2_t test_vnsra_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsra_wv_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_tu( @@ -273,7 +273,7 @@ vint32m4_t test_vnsra_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_tum( @@ -282,7 +282,7 @@ vint32m4_t test_vnsra_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsra_wv_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_tum( @@ -291,7 +291,7 @@ vint8mf8_t test_vnsra_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_tum( @@ -300,7 +300,7 @@ vint8mf8_t test_vnsra_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsra_wv_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_tum( @@ -309,7 +309,7 @@ vint8mf4_t test_vnsra_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_tum( @@ -318,7 +318,7 @@ vint8mf4_t test_vnsra_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsra_wv_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_tum( @@ -327,7 +327,7 @@ vint8mf2_t test_vnsra_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_tum( @@ -336,7 +336,7 @@ vint8mf2_t test_vnsra_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsra_wv_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_tum( @@ -345,7 +345,7 @@ vint8m1_t test_vnsra_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_tum( @@ -354,7 +354,7 @@ vint8m1_t test_vnsra_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsra_wv_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_tum( @@ -363,7 +363,7 @@ vint8m2_t test_vnsra_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_tum( @@ -372,7 +372,7 @@ vint8m2_t test_vnsra_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsra_wv_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_tum( @@ -381,7 +381,7 @@ vint8m4_t test_vnsra_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_tum( @@ -390,7 +390,7 @@ vint8m4_t test_vnsra_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsra_wv_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_tum( @@ -399,7 +399,7 @@ vint16mf4_t test_vnsra_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_tum( @@ -408,7 +408,7 @@ vint16mf4_t test_vnsra_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsra_wv_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_tum( @@ -417,7 +417,7 @@ vint16mf2_t test_vnsra_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_tum( @@ -426,7 +426,7 @@ vint16mf2_t test_vnsra_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsra_wv_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_tum( @@ -435,7 +435,7 @@ vint16m1_t test_vnsra_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_tum( @@ -444,7 +444,7 @@ vint16m1_t test_vnsra_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsra_wv_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_tum( @@ -453,7 +453,7 @@ vint16m2_t test_vnsra_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_tum( @@ -462,7 +462,7 @@ vint16m2_t test_vnsra_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsra_wv_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_tum( @@ -471,7 +471,7 @@ vint16m4_t test_vnsra_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tum( @@ -480,7 +480,7 @@ vint16m4_t test_vnsra_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsra_wv_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tum( @@ -489,7 +489,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_tum( @@ -498,7 +498,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsra_wv_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_tum( @@ -507,7 +507,7 @@ vint32m1_t test_vnsra_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_tum( @@ -516,7 +516,7 @@ vint32m1_t test_vnsra_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsra_wv_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_tum( @@ -525,7 +525,7 @@ vint32m2_t test_vnsra_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_tum( @@ -534,7 +534,7 @@ vint32m2_t test_vnsra_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsra_wv_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_tum( @@ -543,7 +543,7 @@ vint32m4_t test_vnsra_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_tumu( @@ -552,7 +552,7 @@ vint32m4_t test_vnsra_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsra_wv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_tumu( @@ -561,7 +561,7 @@ vint8mf8_t test_vnsra_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_tumu( @@ -570,7 +570,7 @@ vint8mf8_t test_vnsra_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsra_wv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_tumu( @@ -579,7 +579,7 @@ vint8mf4_t test_vnsra_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_tumu( @@ -588,7 +588,7 @@ vint8mf4_t test_vnsra_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsra_wv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_tumu( @@ -597,7 +597,7 @@ vint8mf2_t test_vnsra_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_tumu( @@ -606,7 +606,7 @@ vint8mf2_t test_vnsra_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsra_wv_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_tumu( @@ -615,7 +615,7 @@ vint8m1_t test_vnsra_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_tumu( @@ -624,7 +624,7 @@ vint8m1_t test_vnsra_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsra_wv_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_tumu( @@ -633,7 +633,7 @@ vint8m2_t test_vnsra_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_tumu( @@ -642,7 +642,7 @@ vint8m2_t test_vnsra_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsra_wv_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_tumu( @@ -651,7 +651,7 @@ vint8m4_t test_vnsra_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_tumu( @@ -660,7 +660,7 @@ vint8m4_t test_vnsra_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsra_wv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_tumu( @@ -669,7 +669,7 @@ vint16mf4_t test_vnsra_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_tumu( @@ -678,7 +678,7 @@ vint16mf4_t test_vnsra_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsra_wv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_tumu( @@ -687,7 +687,7 @@ vint16mf2_t test_vnsra_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_tumu( @@ -696,7 +696,7 @@ vint16mf2_t test_vnsra_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsra_wv_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_tumu( @@ -705,7 +705,7 @@ vint16m1_t test_vnsra_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_tumu( @@ -714,7 +714,7 @@ vint16m1_t test_vnsra_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsra_wv_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_tumu( @@ -723,7 +723,7 @@ vint16m2_t test_vnsra_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_tumu( @@ -732,7 +732,7 @@ vint16m2_t test_vnsra_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsra_wv_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_tumu( @@ -741,7 +741,7 @@ vint16m4_t test_vnsra_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_tumu( @@ -750,7 +750,7 @@ vint16m4_t test_vnsra_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsra_wv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_tumu( @@ -759,7 +759,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_tumu( @@ -768,7 +768,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsra_wv_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_tumu( @@ -777,7 +777,7 @@ vint32m1_t test_vnsra_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_tumu( @@ -786,7 +786,7 @@ vint32m1_t test_vnsra_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsra_wv_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_tumu( @@ -795,7 +795,7 @@ vint32m2_t test_vnsra_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_tumu( @@ -804,7 +804,7 @@ vint32m2_t test_vnsra_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsra_wv_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_tumu( @@ -813,7 +813,7 @@ vint32m4_t test_vnsra_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_mu( @@ -822,7 +822,7 @@ vint32m4_t test_vnsra_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsra_wv_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_mu( @@ -831,7 +831,7 @@ vint8mf8_t test_vnsra_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_mu( @@ -840,7 +840,7 @@ vint8mf8_t test_vnsra_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsra_wv_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_mu( @@ -849,7 +849,7 @@ vint8mf4_t test_vnsra_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_mu( @@ -858,7 +858,7 @@ vint8mf4_t test_vnsra_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsra_wv_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_mu( @@ -867,7 +867,7 @@ vint8mf2_t test_vnsra_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_mu( @@ -876,7 +876,7 @@ vint8mf2_t test_vnsra_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsra_wv_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_mu( @@ -885,7 +885,7 @@ vint8m1_t test_vnsra_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_mu( @@ -894,7 +894,7 @@ vint8m1_t test_vnsra_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsra_wv_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_mu( @@ -903,7 +903,7 @@ vint8m2_t test_vnsra_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_mu( @@ -912,7 +912,7 @@ vint8m2_t test_vnsra_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsra_wv_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_mu( @@ -921,7 +921,7 @@ vint8m4_t test_vnsra_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_mu( @@ -930,7 +930,7 @@ vint8m4_t test_vnsra_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsra_wv_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_mu( @@ -939,7 +939,7 @@ vint16mf4_t test_vnsra_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_mu( @@ -948,7 +948,7 @@ vint16mf4_t test_vnsra_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsra_wv_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_mu( @@ -957,7 +957,7 @@ vint16mf2_t test_vnsra_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_mu( @@ -966,7 +966,7 @@ vint16mf2_t test_vnsra_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsra_wv_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_mu( @@ -975,7 +975,7 @@ vint16m1_t test_vnsra_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_mu( @@ -984,7 +984,7 @@ vint16m1_t test_vnsra_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsra_wv_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_mu( @@ -993,7 +993,7 @@ vint16m2_t test_vnsra_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_mu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vnsra_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsra_wv_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_mu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vnsra_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_mu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vnsra_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsra_wv_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_mu( @@ -1029,7 +1029,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_mu( @@ -1038,7 +1038,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsra_wv_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_mu( @@ -1047,7 +1047,7 @@ vint32m1_t test_vnsra_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_mu( @@ -1056,7 +1056,7 @@ vint32m1_t test_vnsra_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsra_wv_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_mu( @@ -1065,7 +1065,7 @@ vint32m2_t test_vnsra_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_mu( @@ -1074,7 +1074,7 @@ vint32m2_t test_vnsra_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsra_wv_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wv_i32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_mu( @@ -1083,6 +1083,6 @@ vint32m4_t test_vnsra_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnsra_wx_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsra_wx_i32m4_mu(mask, maskedoff, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnsrl.c index b0731068238c..c396f8f89790 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vnsrl.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsrl_wv_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsrl_wv_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsrl_wv_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsrl_wv_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vnsrl_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vnsrl_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsrl_wv_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vnsrl_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vnsrl_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsrl_wv_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vnsrl_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vnsrl_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsrl_wv_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_tu( @@ -129,7 +129,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_tu( @@ -138,7 +138,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsrl_wv_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_tu( @@ -147,7 +147,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_tu( @@ -156,7 +156,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsrl_wv_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_tu( @@ -165,7 +165,7 @@ vuint16m1_t test_vnsrl_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_tu( @@ -174,7 +174,7 @@ vuint16m1_t test_vnsrl_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsrl_wv_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_tu( @@ -183,7 +183,7 @@ vuint16m2_t test_vnsrl_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_tu( @@ -192,7 +192,7 @@ vuint16m2_t test_vnsrl_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsrl_wv_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_tu( @@ -201,7 +201,7 @@ vuint16m4_t test_vnsrl_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tu( @@ -210,7 +210,7 @@ vuint16m4_t test_vnsrl_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsrl_wv_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tu( @@ -219,7 +219,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_tu( @@ -228,7 +228,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsrl_wv_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_tu( @@ -237,7 +237,7 @@ vuint32m1_t test_vnsrl_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_tu( @@ -246,7 +246,7 @@ vuint32m1_t test_vnsrl_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsrl_wv_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_tu( @@ -255,7 +255,7 @@ vuint32m2_t test_vnsrl_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_tu( @@ -264,7 +264,7 @@ vuint32m2_t test_vnsrl_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsrl_wv_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_tu( @@ -273,7 +273,7 @@ vuint32m4_t test_vnsrl_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_tum( @@ -282,7 +282,7 @@ vuint32m4_t test_vnsrl_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsrl_wv_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_tum( @@ -291,7 +291,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_tum( @@ -300,7 +300,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsrl_wv_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_tum( @@ -309,7 +309,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_tum( @@ -318,7 +318,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsrl_wv_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_tum( @@ -327,7 +327,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_tum( @@ -336,7 +336,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsrl_wv_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_tum( @@ -345,7 +345,7 @@ vuint8m1_t test_vnsrl_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_tum( @@ -354,7 +354,7 @@ vuint8m1_t test_vnsrl_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsrl_wv_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_tum( @@ -363,7 +363,7 @@ vuint8m2_t test_vnsrl_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_tum( @@ -372,7 +372,7 @@ vuint8m2_t test_vnsrl_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsrl_wv_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_tum( @@ -381,7 +381,7 @@ vuint8m4_t test_vnsrl_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_tum( @@ -390,7 +390,7 @@ vuint8m4_t test_vnsrl_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsrl_wv_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_tum( @@ -399,7 +399,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_tum( @@ -408,7 +408,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsrl_wv_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_tum( @@ -417,7 +417,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_tum( @@ -426,7 +426,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsrl_wv_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_tum( @@ -435,7 +435,7 @@ vuint16m1_t test_vnsrl_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_tum( @@ -444,7 +444,7 @@ vuint16m1_t test_vnsrl_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsrl_wv_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_tum( @@ -453,7 +453,7 @@ vuint16m2_t test_vnsrl_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_tum( @@ -462,7 +462,7 @@ vuint16m2_t test_vnsrl_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsrl_wv_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_tum( @@ -471,7 +471,7 @@ vuint16m4_t test_vnsrl_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tum( @@ -480,7 +480,7 @@ vuint16m4_t test_vnsrl_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsrl_wv_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tum( @@ -489,7 +489,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_tum( @@ -498,7 +498,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsrl_wv_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_tum( @@ -507,7 +507,7 @@ vuint32m1_t test_vnsrl_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_tum( @@ -516,7 +516,7 @@ vuint32m1_t test_vnsrl_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsrl_wv_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_tum( @@ -525,7 +525,7 @@ vuint32m2_t test_vnsrl_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_tum( @@ -534,7 +534,7 @@ vuint32m2_t test_vnsrl_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsrl_wv_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_tum( @@ -543,7 +543,7 @@ vuint32m4_t test_vnsrl_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_tumu( @@ -552,7 +552,7 @@ vuint32m4_t test_vnsrl_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsrl_wv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_tumu( @@ -561,7 +561,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_tumu( @@ -570,7 +570,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsrl_wv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_tumu( @@ -579,7 +579,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_tumu( @@ -588,7 +588,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsrl_wv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_tumu( @@ -597,7 +597,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_tumu( @@ -606,7 +606,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsrl_wv_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_tumu( @@ -615,7 +615,7 @@ vuint8m1_t test_vnsrl_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_tumu( @@ -624,7 +624,7 @@ vuint8m1_t test_vnsrl_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsrl_wv_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_tumu( @@ -633,7 +633,7 @@ vuint8m2_t test_vnsrl_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_tumu( @@ -642,7 +642,7 @@ vuint8m2_t test_vnsrl_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsrl_wv_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_tumu( @@ -651,7 +651,7 @@ vuint8m4_t test_vnsrl_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_tumu( @@ -660,7 +660,7 @@ vuint8m4_t test_vnsrl_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsrl_wv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_tumu( @@ -669,7 +669,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_tumu( @@ -678,7 +678,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsrl_wv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_tumu( @@ -687,7 +687,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_tumu( @@ -696,7 +696,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsrl_wv_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_tumu( @@ -705,7 +705,7 @@ vuint16m1_t test_vnsrl_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_tumu( @@ -714,7 +714,7 @@ vuint16m1_t test_vnsrl_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsrl_wv_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_tumu( @@ -723,7 +723,7 @@ vuint16m2_t test_vnsrl_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_tumu( @@ -732,7 +732,7 @@ vuint16m2_t test_vnsrl_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsrl_wv_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_tumu( @@ -741,7 +741,7 @@ vuint16m4_t test_vnsrl_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_tumu( @@ -750,7 +750,7 @@ vuint16m4_t test_vnsrl_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsrl_wv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_tumu( @@ -759,7 +759,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_tumu( @@ -768,7 +768,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsrl_wv_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_tumu( @@ -777,7 +777,7 @@ vuint32m1_t test_vnsrl_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_tumu( @@ -786,7 +786,7 @@ vuint32m1_t test_vnsrl_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsrl_wv_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_tumu( @@ -795,7 +795,7 @@ vuint32m2_t test_vnsrl_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_tumu( @@ -804,7 +804,7 @@ vuint32m2_t test_vnsrl_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsrl_wv_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_tumu( @@ -813,7 +813,7 @@ vuint32m4_t test_vnsrl_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_mu( @@ -822,7 +822,7 @@ vuint32m4_t test_vnsrl_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsrl_wv_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_mu( @@ -831,7 +831,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_mu( @@ -840,7 +840,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsrl_wv_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_mu( @@ -849,7 +849,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_mu( @@ -858,7 +858,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsrl_wv_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_mu( @@ -867,7 +867,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_mu( @@ -876,7 +876,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsrl_wv_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_mu( @@ -885,7 +885,7 @@ vuint8m1_t test_vnsrl_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_mu( @@ -894,7 +894,7 @@ vuint8m1_t test_vnsrl_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsrl_wv_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_mu( @@ -903,7 +903,7 @@ vuint8m2_t test_vnsrl_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_mu( @@ -912,7 +912,7 @@ vuint8m2_t test_vnsrl_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsrl_wv_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_mu( @@ -921,7 +921,7 @@ vuint8m4_t test_vnsrl_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_mu( @@ -930,7 +930,7 @@ vuint8m4_t test_vnsrl_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsrl_wv_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_mu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_mu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsrl_wv_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_mu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_mu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsrl_wv_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_mu( @@ -975,7 +975,7 @@ vuint16m1_t test_vnsrl_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_mu( @@ -984,7 +984,7 @@ vuint16m1_t test_vnsrl_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsrl_wv_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_mu( @@ -993,7 +993,7 @@ vuint16m2_t test_vnsrl_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_mu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vnsrl_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsrl_wv_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_mu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vnsrl_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_mu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vnsrl_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsrl_wv_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_mu( @@ -1029,7 +1029,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_mu( @@ -1038,7 +1038,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsrl_wv_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_mu( @@ -1047,7 +1047,7 @@ vuint32m1_t test_vnsrl_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_mu( @@ -1056,7 +1056,7 @@ vuint32m1_t test_vnsrl_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsrl_wv_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_mu( @@ -1065,7 +1065,7 @@ vuint32m2_t test_vnsrl_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_mu( @@ -1074,7 +1074,7 @@ vuint32m2_t test_vnsrl_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsrl_wv_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wv_u32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_mu( @@ -1083,6 +1083,6 @@ vuint32m4_t test_vnsrl_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnsrl_wx_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vnsrl_wx_u32m4_mu(mask, maskedoff, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vor.c index 9b721981afb7..920b8a575300 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vor_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vor_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vor_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vor_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vor_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vor_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vor_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vor_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vor_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vor_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vor_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vor_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vor_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vor_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vor_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vor_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vor_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vor_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vor_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vor_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vor_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vor_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_tu( @@ -408,7 +408,7 @@ vint64m8_t test_vor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vor_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_tu( @@ -417,7 +417,7 @@ vuint8mf8_t test_vor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_tu( @@ -426,7 +426,7 @@ vuint8mf8_t test_vor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vor_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_tu( @@ -435,7 +435,7 @@ vuint8mf4_t test_vor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_tu( @@ -444,7 +444,7 @@ vuint8mf4_t test_vor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vor_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_tu( @@ -453,7 +453,7 @@ vuint8mf2_t test_vor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m1_tu( @@ -462,7 +462,7 @@ vuint8mf2_t test_vor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vor_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m1_tu( @@ -471,7 +471,7 @@ vuint8m1_t test_vor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m2_tu( @@ -480,7 +480,7 @@ vuint8m1_t test_vor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vor_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m2_tu( @@ -489,7 +489,7 @@ vuint8m2_t test_vor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m4_tu( @@ -498,7 +498,7 @@ vuint8m2_t test_vor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vor_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m4_tu( @@ -507,7 +507,7 @@ vuint8m4_t test_vor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m8_tu( @@ -516,7 +516,7 @@ vuint8m4_t test_vor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vor_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m8_tu( @@ -525,7 +525,7 @@ vuint8m8_t test_vor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_tu( @@ -534,7 +534,7 @@ vuint8m8_t test_vor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vor_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_tu( @@ -543,7 +543,7 @@ vuint16mf4_t test_vor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_tu( @@ -552,7 +552,7 @@ vuint16mf4_t test_vor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vor_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_tu( @@ -561,7 +561,7 @@ vuint16mf2_t test_vor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m1_tu( @@ -570,7 +570,7 @@ vuint16mf2_t test_vor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vor_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m1_tu( @@ -579,7 +579,7 @@ vuint16m1_t test_vor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m2_tu( @@ -588,7 +588,7 @@ vuint16m1_t test_vor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vor_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m2_tu( @@ -597,7 +597,7 @@ vuint16m2_t test_vor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m4_tu( @@ -606,7 +606,7 @@ vuint16m2_t test_vor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vor_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m4_tu( @@ -615,7 +615,7 @@ vuint16m4_t test_vor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m8_tu( @@ -624,7 +624,7 @@ vuint16m4_t test_vor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vor_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m8_tu( @@ -633,7 +633,7 @@ vuint16m8_t test_vor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tu( @@ -642,7 +642,7 @@ vuint16m8_t test_vor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vor_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tu( @@ -651,7 +651,7 @@ vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m1_tu( @@ -660,7 +660,7 @@ vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vor_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m1_tu( @@ -669,7 +669,7 @@ vuint32m1_t test_vor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m2_tu( @@ -678,7 +678,7 @@ vuint32m1_t test_vor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vor_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m2_tu( @@ -687,7 +687,7 @@ vuint32m2_t test_vor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m4_tu( @@ -696,7 +696,7 @@ vuint32m2_t test_vor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vor_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m4_tu( @@ -705,7 +705,7 @@ vuint32m4_t test_vor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m8_tu( @@ -714,7 +714,7 @@ vuint32m4_t test_vor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vor_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m8_tu( @@ -723,7 +723,7 @@ vuint32m8_t test_vor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m1_tu( @@ -732,7 +732,7 @@ vuint32m8_t test_vor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vor_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m1_tu( @@ -741,7 +741,7 @@ vuint64m1_t test_vor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m2_tu( @@ -750,7 +750,7 @@ vuint64m1_t test_vor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vor_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m2_tu( @@ -759,7 +759,7 @@ vuint64m2_t test_vor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m4_tu( @@ -768,7 +768,7 @@ vuint64m2_t test_vor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vor_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m4_tu( @@ -777,7 +777,7 @@ vuint64m4_t test_vor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m8_tu( @@ -786,7 +786,7 @@ vuint64m4_t test_vor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vor_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m8_tu( @@ -795,7 +795,7 @@ vuint64m8_t test_vor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf8_tum( @@ -804,7 +804,7 @@ vuint64m8_t test_vor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vor_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_tum( @@ -813,7 +813,7 @@ vint8mf8_t test_vor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_tum( @@ -822,7 +822,7 @@ vint8mf8_t test_vor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vor_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_tum( @@ -831,7 +831,7 @@ vint8mf4_t test_vor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_tum( @@ -840,7 +840,7 @@ vint8mf4_t test_vor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vor_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_tum( @@ -849,7 +849,7 @@ vint8mf2_t test_vor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m1_tum( @@ -858,7 +858,7 @@ vint8mf2_t test_vor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vor_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m1_tum( @@ -867,7 +867,7 @@ vint8m1_t test_vor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m2_tum( @@ -876,7 +876,7 @@ vint8m1_t test_vor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vor_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m2_tum( @@ -885,7 +885,7 @@ vint8m2_t test_vor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m4_tum( @@ -894,7 +894,7 @@ vint8m2_t test_vor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vor_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m4_tum( @@ -903,7 +903,7 @@ vint8m4_t test_vor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m8_tum( @@ -912,7 +912,7 @@ vint8m4_t test_vor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vor_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m8_tum( @@ -921,7 +921,7 @@ vint8m8_t test_vor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_tum( @@ -930,7 +930,7 @@ vint8m8_t test_vor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vor_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_tum( @@ -939,7 +939,7 @@ vint16mf4_t test_vor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_tum( @@ -948,7 +948,7 @@ vint16mf4_t test_vor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vor_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_tum( @@ -957,7 +957,7 @@ vint16mf2_t test_vor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m1_tum( @@ -966,7 +966,7 @@ vint16mf2_t test_vor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vor_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m1_tum( @@ -975,7 +975,7 @@ vint16m1_t test_vor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m2_tum( @@ -984,7 +984,7 @@ vint16m1_t test_vor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vor_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m2_tum( @@ -993,7 +993,7 @@ vint16m2_t test_vor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m4_tum( @@ -1002,7 +1002,7 @@ vint16m2_t test_vor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vor_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m4_tum( @@ -1011,7 +1011,7 @@ vint16m4_t test_vor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m8_tum( @@ -1020,7 +1020,7 @@ vint16m4_t test_vor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vor_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m8_tum( @@ -1029,7 +1029,7 @@ vint16m8_t test_vor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tum( @@ -1038,7 +1038,7 @@ vint16m8_t test_vor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vor_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tum( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m1_tum( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vor_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m1_tum( @@ -1065,7 +1065,7 @@ vint32m1_t test_vor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m2_tum( @@ -1074,7 +1074,7 @@ vint32m1_t test_vor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vor_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m2_tum( @@ -1083,7 +1083,7 @@ vint32m2_t test_vor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m4_tum( @@ -1092,7 +1092,7 @@ vint32m2_t test_vor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vor_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m4_tum( @@ -1101,7 +1101,7 @@ vint32m4_t test_vor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m8_tum( @@ -1110,7 +1110,7 @@ vint32m4_t test_vor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vor_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m8_tum( @@ -1119,7 +1119,7 @@ vint32m8_t test_vor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m1_tum( @@ -1128,7 +1128,7 @@ vint32m8_t test_vor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vor_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m1_tum( @@ -1137,7 +1137,7 @@ vint64m1_t test_vor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m2_tum( @@ -1146,7 +1146,7 @@ vint64m1_t test_vor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vor_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m2_tum( @@ -1155,7 +1155,7 @@ vint64m2_t test_vor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m4_tum( @@ -1164,7 +1164,7 @@ vint64m2_t test_vor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vor_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m4_tum( @@ -1173,7 +1173,7 @@ vint64m4_t test_vor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m8_tum( @@ -1182,7 +1182,7 @@ vint64m4_t test_vor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vor_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m8_tum( @@ -1191,7 +1191,7 @@ vint64m8_t test_vor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_tum( @@ -1200,7 +1200,7 @@ vint64m8_t test_vor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vor_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_tum( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_tum( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vor_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_tum( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_tum( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vor_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_tum( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m1_tum( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vor_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m1_tum( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m2_tum( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vor_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m2_tum( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m4_tum( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vor_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m4_tum( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m8_tum( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vor_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m8_tum( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_tum( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vor_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_tum( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_tum( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vor_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_tum( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m1_tum( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vor_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m1_tum( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m2_tum( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vor_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m2_tum( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m4_tum( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vor_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m4_tum( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m8_tum( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vor_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m8_tum( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tum( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vor_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tum( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m1_tum( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vor_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m1_tum( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m2_tum( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vor_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m2_tum( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m4_tum( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vor_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m4_tum( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m8_tum( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vor_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m8_tum( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m1_tum( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vor_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m1_tum( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m2_tum( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vor_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m2_tum( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m4_tum( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vor_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m4_tum( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m8_tum( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vor_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m8_tum( @@ -1587,7 +1587,7 @@ vuint64m8_t test_vor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf8_tumu( @@ -1596,7 +1596,7 @@ vuint64m8_t test_vor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vor_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_tumu( @@ -1605,7 +1605,7 @@ vint8mf8_t test_vor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_tumu( @@ -1614,7 +1614,7 @@ vint8mf8_t test_vor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vor_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_tumu( @@ -1623,7 +1623,7 @@ vint8mf4_t test_vor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_tumu( @@ -1632,7 +1632,7 @@ vint8mf4_t test_vor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vor_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_tumu( @@ -1641,7 +1641,7 @@ vint8mf2_t test_vor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m1_tumu( @@ -1650,7 +1650,7 @@ vint8mf2_t test_vor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vor_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m1_tumu( @@ -1659,7 +1659,7 @@ vint8m1_t test_vor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m2_tumu( @@ -1668,7 +1668,7 @@ vint8m1_t test_vor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vor_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m2_tumu( @@ -1677,7 +1677,7 @@ vint8m2_t test_vor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m4_tumu( @@ -1686,7 +1686,7 @@ vint8m2_t test_vor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vor_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m4_tumu( @@ -1695,7 +1695,7 @@ vint8m4_t test_vor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m8_tumu( @@ -1704,7 +1704,7 @@ vint8m4_t test_vor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vor_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m8_tumu( @@ -1713,7 +1713,7 @@ vint8m8_t test_vor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_tumu( @@ -1722,7 +1722,7 @@ vint8m8_t test_vor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vor_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_tumu( @@ -1731,7 +1731,7 @@ vint16mf4_t test_vor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_tumu( @@ -1740,7 +1740,7 @@ vint16mf4_t test_vor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vor_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_tumu( @@ -1749,7 +1749,7 @@ vint16mf2_t test_vor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m1_tumu( @@ -1758,7 +1758,7 @@ vint16mf2_t test_vor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vor_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m1_tumu( @@ -1767,7 +1767,7 @@ vint16m1_t test_vor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m2_tumu( @@ -1776,7 +1776,7 @@ vint16m1_t test_vor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vor_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m2_tumu( @@ -1785,7 +1785,7 @@ vint16m2_t test_vor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m4_tumu( @@ -1794,7 +1794,7 @@ vint16m2_t test_vor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vor_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m4_tumu( @@ -1803,7 +1803,7 @@ vint16m4_t test_vor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m8_tumu( @@ -1812,7 +1812,7 @@ vint16m4_t test_vor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vor_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m8_tumu( @@ -1821,7 +1821,7 @@ vint16m8_t test_vor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_tumu( @@ -1830,7 +1830,7 @@ vint16m8_t test_vor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vor_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_tumu( @@ -1839,7 +1839,7 @@ vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m1_tumu( @@ -1848,7 +1848,7 @@ vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vor_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m1_tumu( @@ -1857,7 +1857,7 @@ vint32m1_t test_vor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m2_tumu( @@ -1866,7 +1866,7 @@ vint32m1_t test_vor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vor_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m2_tumu( @@ -1875,7 +1875,7 @@ vint32m2_t test_vor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m4_tumu( @@ -1884,7 +1884,7 @@ vint32m2_t test_vor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vor_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m4_tumu( @@ -1893,7 +1893,7 @@ vint32m4_t test_vor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m8_tumu( @@ -1902,7 +1902,7 @@ vint32m4_t test_vor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vor_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m8_tumu( @@ -1911,7 +1911,7 @@ vint32m8_t test_vor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m1_tumu( @@ -1920,7 +1920,7 @@ vint32m8_t test_vor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vor_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m1_tumu( @@ -1929,7 +1929,7 @@ vint64m1_t test_vor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m2_tumu( @@ -1938,7 +1938,7 @@ vint64m1_t test_vor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vor_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m2_tumu( @@ -1947,7 +1947,7 @@ vint64m2_t test_vor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m4_tumu( @@ -1956,7 +1956,7 @@ vint64m2_t test_vor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vor_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m4_tumu( @@ -1965,7 +1965,7 @@ vint64m4_t test_vor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m8_tumu( @@ -1974,7 +1974,7 @@ vint64m4_t test_vor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vor_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m8_tumu( @@ -1983,7 +1983,7 @@ vint64m8_t test_vor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_tumu( @@ -1992,7 +1992,7 @@ vint64m8_t test_vor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vor_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_tumu( @@ -2001,7 +2001,7 @@ vuint8mf8_t test_vor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_tumu( @@ -2010,7 +2010,7 @@ vuint8mf8_t test_vor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vor_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_tumu( @@ -2019,7 +2019,7 @@ vuint8mf4_t test_vor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_tumu( @@ -2028,7 +2028,7 @@ vuint8mf4_t test_vor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vor_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_tumu( @@ -2037,7 +2037,7 @@ vuint8mf2_t test_vor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m1_tumu( @@ -2046,7 +2046,7 @@ vuint8mf2_t test_vor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vor_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m1_tumu( @@ -2055,7 +2055,7 @@ vuint8m1_t test_vor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m2_tumu( @@ -2064,7 +2064,7 @@ vuint8m1_t test_vor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vor_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m2_tumu( @@ -2073,7 +2073,7 @@ vuint8m2_t test_vor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m4_tumu( @@ -2082,7 +2082,7 @@ vuint8m2_t test_vor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vor_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m4_tumu( @@ -2091,7 +2091,7 @@ vuint8m4_t test_vor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m8_tumu( @@ -2100,7 +2100,7 @@ vuint8m4_t test_vor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vor_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m8_tumu( @@ -2109,7 +2109,7 @@ vuint8m8_t test_vor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_tumu( @@ -2118,7 +2118,7 @@ vuint8m8_t test_vor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vor_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_tumu( @@ -2127,7 +2127,7 @@ vuint16mf4_t test_vor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_tumu( @@ -2136,7 +2136,7 @@ vuint16mf4_t test_vor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vor_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_tumu( @@ -2145,7 +2145,7 @@ vuint16mf2_t test_vor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m1_tumu( @@ -2154,7 +2154,7 @@ vuint16mf2_t test_vor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vor_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m1_tumu( @@ -2163,7 +2163,7 @@ vuint16m1_t test_vor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m2_tumu( @@ -2172,7 +2172,7 @@ vuint16m1_t test_vor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vor_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m2_tumu( @@ -2181,7 +2181,7 @@ vuint16m2_t test_vor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m4_tumu( @@ -2190,7 +2190,7 @@ vuint16m2_t test_vor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vor_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m4_tumu( @@ -2199,7 +2199,7 @@ vuint16m4_t test_vor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m8_tumu( @@ -2208,7 +2208,7 @@ vuint16m4_t test_vor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vor_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m8_tumu( @@ -2217,7 +2217,7 @@ vuint16m8_t test_vor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_tumu( @@ -2226,7 +2226,7 @@ vuint16m8_t test_vor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vor_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_tumu( @@ -2235,7 +2235,7 @@ vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m1_tumu( @@ -2244,7 +2244,7 @@ vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vor_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m1_tumu( @@ -2253,7 +2253,7 @@ vuint32m1_t test_vor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m2_tumu( @@ -2262,7 +2262,7 @@ vuint32m1_t test_vor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vor_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m2_tumu( @@ -2271,7 +2271,7 @@ vuint32m2_t test_vor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m4_tumu( @@ -2280,7 +2280,7 @@ vuint32m2_t test_vor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vor_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m4_tumu( @@ -2289,7 +2289,7 @@ vuint32m4_t test_vor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m8_tumu( @@ -2298,7 +2298,7 @@ vuint32m4_t test_vor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vor_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m8_tumu( @@ -2307,7 +2307,7 @@ vuint32m8_t test_vor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m1_tumu( @@ -2316,7 +2316,7 @@ vuint32m8_t test_vor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vor_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m1_tumu( @@ -2325,7 +2325,7 @@ vuint64m1_t test_vor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m2_tumu( @@ -2334,7 +2334,7 @@ vuint64m1_t test_vor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vor_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m2_tumu( @@ -2343,7 +2343,7 @@ vuint64m2_t test_vor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m4_tumu( @@ -2352,7 +2352,7 @@ vuint64m2_t test_vor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vor_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m4_tumu( @@ -2361,7 +2361,7 @@ vuint64m4_t test_vor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m8_tumu( @@ -2370,7 +2370,7 @@ vuint64m4_t test_vor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vor_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m8_tumu( @@ -2379,7 +2379,7 @@ vuint64m8_t test_vor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf8_mu( @@ -2388,7 +2388,7 @@ vuint64m8_t test_vor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vor_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_mu( @@ -2397,7 +2397,7 @@ vint8mf8_t test_vor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_mu( @@ -2406,7 +2406,7 @@ vint8mf8_t test_vor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vor_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_mu( @@ -2415,7 +2415,7 @@ vint8mf4_t test_vor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_mu( @@ -2424,7 +2424,7 @@ vint8mf4_t test_vor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vor_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_mu( @@ -2433,7 +2433,7 @@ vint8mf2_t test_vor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m1_mu( @@ -2442,7 +2442,7 @@ vint8mf2_t test_vor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vor_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m1_mu( @@ -2451,7 +2451,7 @@ vint8m1_t test_vor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m2_mu( @@ -2460,7 +2460,7 @@ vint8m1_t test_vor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vor_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m2_mu( @@ -2469,7 +2469,7 @@ vint8m2_t test_vor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m4_mu( @@ -2478,7 +2478,7 @@ vint8m2_t test_vor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vor_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m4_mu( @@ -2487,7 +2487,7 @@ vint8m4_t test_vor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m8_mu( @@ -2496,7 +2496,7 @@ vint8m4_t test_vor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vor_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m8_mu( @@ -2505,7 +2505,7 @@ vint8m8_t test_vor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vor_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_mu( @@ -2514,7 +2514,7 @@ vint8m8_t test_vor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vor_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_mu( @@ -2523,7 +2523,7 @@ vint16mf4_t test_vor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_mu( @@ -2532,7 +2532,7 @@ vint16mf4_t test_vor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vor_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_mu( @@ -2541,7 +2541,7 @@ vint16mf2_t test_vor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m1_mu( @@ -2550,7 +2550,7 @@ vint16mf2_t test_vor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vor_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m1_mu( @@ -2559,7 +2559,7 @@ vint16m1_t test_vor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m2_mu( @@ -2568,7 +2568,7 @@ vint16m1_t test_vor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vor_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m2_mu( @@ -2577,7 +2577,7 @@ vint16m2_t test_vor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m4_mu( @@ -2586,7 +2586,7 @@ vint16m2_t test_vor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vor_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m4_mu( @@ -2595,7 +2595,7 @@ vint16m4_t test_vor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m8_mu( @@ -2604,7 +2604,7 @@ vint16m4_t test_vor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vor_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m8_mu( @@ -2613,7 +2613,7 @@ vint16m8_t test_vor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vor_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_mu( @@ -2622,7 +2622,7 @@ vint16m8_t test_vor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vor_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_mu( @@ -2631,7 +2631,7 @@ vint32mf2_t test_vor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m1_mu( @@ -2640,7 +2640,7 @@ vint32mf2_t test_vor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vor_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m1_mu( @@ -2649,7 +2649,7 @@ vint32m1_t test_vor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m2_mu( @@ -2658,7 +2658,7 @@ vint32m1_t test_vor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vor_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m2_mu( @@ -2667,7 +2667,7 @@ vint32m2_t test_vor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m4_mu( @@ -2676,7 +2676,7 @@ vint32m2_t test_vor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vor_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m4_mu( @@ -2685,7 +2685,7 @@ vint32m4_t test_vor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m8_mu( @@ -2694,7 +2694,7 @@ vint32m4_t test_vor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vor_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m8_mu( @@ -2703,7 +2703,7 @@ vint32m8_t test_vor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vor_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m1_mu( @@ -2712,7 +2712,7 @@ vint32m8_t test_vor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vor_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m1_mu( @@ -2721,7 +2721,7 @@ vint64m1_t test_vor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m2_mu( @@ -2730,7 +2730,7 @@ vint64m1_t test_vor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vor_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m2_mu( @@ -2739,7 +2739,7 @@ vint64m2_t test_vor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m4_mu( @@ -2748,7 +2748,7 @@ vint64m2_t test_vor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vor_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m4_mu( @@ -2757,7 +2757,7 @@ vint64m4_t test_vor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m8_mu( @@ -2766,7 +2766,7 @@ vint64m4_t test_vor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vor_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m8_mu( @@ -2775,7 +2775,7 @@ vint64m8_t test_vor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vor_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_mu( @@ -2784,7 +2784,7 @@ vint64m8_t test_vor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vor_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_mu( @@ -2793,7 +2793,7 @@ vuint8mf8_t test_vor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_mu( @@ -2802,7 +2802,7 @@ vuint8mf8_t test_vor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vor_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_mu( @@ -2811,7 +2811,7 @@ vuint8mf4_t test_vor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_mu( @@ -2820,7 +2820,7 @@ vuint8mf4_t test_vor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vor_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_mu( @@ -2829,7 +2829,7 @@ vuint8mf2_t test_vor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m1_mu( @@ -2838,7 +2838,7 @@ vuint8mf2_t test_vor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vor_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m1_mu( @@ -2847,7 +2847,7 @@ vuint8m1_t test_vor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m2_mu( @@ -2856,7 +2856,7 @@ vuint8m1_t test_vor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vor_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m2_mu( @@ -2865,7 +2865,7 @@ vuint8m2_t test_vor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m4_mu( @@ -2874,7 +2874,7 @@ vuint8m2_t test_vor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vor_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m4_mu( @@ -2883,7 +2883,7 @@ vuint8m4_t test_vor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m8_mu( @@ -2892,7 +2892,7 @@ vuint8m4_t test_vor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vor_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m8_mu( @@ -2901,7 +2901,7 @@ vuint8m8_t test_vor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vor_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_mu( @@ -2910,7 +2910,7 @@ vuint8m8_t test_vor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vor_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_mu( @@ -2919,7 +2919,7 @@ vuint16mf4_t test_vor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_mu( @@ -2928,7 +2928,7 @@ vuint16mf4_t test_vor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vor_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_mu( @@ -2937,7 +2937,7 @@ vuint16mf2_t test_vor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m1_mu( @@ -2946,7 +2946,7 @@ vuint16mf2_t test_vor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vor_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m1_mu( @@ -2955,7 +2955,7 @@ vuint16m1_t test_vor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m2_mu( @@ -2964,7 +2964,7 @@ vuint16m1_t test_vor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vor_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m2_mu( @@ -2973,7 +2973,7 @@ vuint16m2_t test_vor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m4_mu( @@ -2982,7 +2982,7 @@ vuint16m2_t test_vor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vor_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m4_mu( @@ -2991,7 +2991,7 @@ vuint16m4_t test_vor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m8_mu( @@ -3000,7 +3000,7 @@ vuint16m4_t test_vor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vor_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m8_mu( @@ -3009,7 +3009,7 @@ vuint16m8_t test_vor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vor_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_mu( @@ -3018,7 +3018,7 @@ vuint16m8_t test_vor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vor_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_mu( @@ -3027,7 +3027,7 @@ vuint32mf2_t test_vor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m1_mu( @@ -3036,7 +3036,7 @@ vuint32mf2_t test_vor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vor_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m1_mu( @@ -3045,7 +3045,7 @@ vuint32m1_t test_vor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m2_mu( @@ -3054,7 +3054,7 @@ vuint32m1_t test_vor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vor_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m2_mu( @@ -3063,7 +3063,7 @@ vuint32m2_t test_vor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m4_mu( @@ -3072,7 +3072,7 @@ vuint32m2_t test_vor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vor_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m4_mu( @@ -3081,7 +3081,7 @@ vuint32m4_t test_vor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m8_mu( @@ -3090,7 +3090,7 @@ vuint32m4_t test_vor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vor_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m8_mu( @@ -3099,7 +3099,7 @@ vuint32m8_t test_vor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vor_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m1_mu( @@ -3108,7 +3108,7 @@ vuint32m8_t test_vor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vor_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m1_mu( @@ -3117,7 +3117,7 @@ vuint64m1_t test_vor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m2_mu( @@ -3126,7 +3126,7 @@ vuint64m1_t test_vor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vor_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m2_mu( @@ -3135,7 +3135,7 @@ vuint64m2_t test_vor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m4_mu( @@ -3144,7 +3144,7 @@ vuint64m2_t test_vor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vor_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m4_mu( @@ -3153,7 +3153,7 @@ vuint64m4_t test_vor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m8_mu( @@ -3162,7 +3162,7 @@ vuint64m4_t test_vor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vor_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m8_mu( @@ -3171,6 +3171,6 @@ vuint64m8_t test_vor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vor_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vor_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredand.c index 85ddc2d592e7..79e9dcafecf2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredand.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_tu( @@ -21,7 +21,7 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_tu( @@ -30,7 +30,7 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_tu( @@ -39,7 +39,7 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vredand_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_tu( @@ -57,7 +57,7 @@ vint8m1_t test_vredand_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_tu( @@ -66,7 +66,7 @@ vint8m1_t test_vredand_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vredand_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_tu( @@ -84,7 +84,7 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_tu( @@ -93,7 +93,7 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vredand_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_tu( @@ -111,7 +111,7 @@ vint16m1_t test_vredand_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_tu( @@ -120,7 +120,7 @@ vint16m1_t test_vredand_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tu( @@ -129,7 +129,7 @@ vint16m1_t test_vredand_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_tu( @@ -138,7 +138,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vredand_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_tu( @@ -156,7 +156,7 @@ vint32m1_t test_vredand_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_tu( @@ -165,7 +165,7 @@ vint32m1_t test_vredand_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_tu( @@ -174,7 +174,7 @@ vint32m1_t test_vredand_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_tu( @@ -183,7 +183,7 @@ vint64m1_t test_vredand_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_tu( @@ -192,7 +192,7 @@ vint64m1_t test_vredand_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_tu( @@ -201,7 +201,7 @@ vint64m1_t test_vredand_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_tu( @@ -210,7 +210,7 @@ vint64m1_t test_vredand_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_tu( @@ -219,7 +219,7 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_tu( @@ -228,7 +228,7 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_tu( @@ -237,7 +237,7 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_tu( @@ -246,7 +246,7 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_tu( @@ -255,7 +255,7 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_tu( @@ -264,7 +264,7 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_tu( @@ -273,7 +273,7 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_tu( @@ -282,7 +282,7 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_tu( @@ -291,7 +291,7 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_tu( @@ -300,7 +300,7 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_tu( @@ -309,7 +309,7 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_tu( @@ -318,7 +318,7 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tu( @@ -327,7 +327,7 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_tu( @@ -336,7 +336,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_tu( @@ -345,7 +345,7 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_tu( @@ -354,7 +354,7 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_tu( @@ -363,7 +363,7 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_tu( @@ -372,7 +372,7 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_tu( @@ -381,7 +381,7 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_tu( @@ -390,7 +390,7 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_tu( @@ -399,7 +399,7 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_tum( @@ -408,7 +408,7 @@ vuint64m1_t test_vredand_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_tum( @@ -417,7 +417,7 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_tum( @@ -426,7 +426,7 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_tum( @@ -435,7 +435,7 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_tum( @@ -444,7 +444,7 @@ vint8m1_t test_vredand_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_tum( @@ -453,7 +453,7 @@ vint8m1_t test_vredand_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_tum( @@ -462,7 +462,7 @@ vint8m1_t test_vredand_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredand_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vredand_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_tum( @@ -480,7 +480,7 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_tum( @@ -489,7 +489,7 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_tum( @@ -498,7 +498,7 @@ vint16m1_t test_vredand_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_tum( @@ -507,7 +507,7 @@ vint16m1_t test_vredand_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_tum( @@ -516,7 +516,7 @@ vint16m1_t test_vredand_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredand_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tum( @@ -525,7 +525,7 @@ vint16m1_t test_vredand_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_tum( @@ -534,7 +534,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_tum( @@ -543,7 +543,7 @@ vint32m1_t test_vredand_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_tum( @@ -552,7 +552,7 @@ vint32m1_t test_vredand_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_tum( @@ -561,7 +561,7 @@ vint32m1_t test_vredand_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredand_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_tum( @@ -570,7 +570,7 @@ vint32m1_t test_vredand_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_tum( @@ -579,7 +579,7 @@ vint64m1_t test_vredand_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_tum( @@ -588,7 +588,7 @@ vint64m1_t test_vredand_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_tum( @@ -597,7 +597,7 @@ vint64m1_t test_vredand_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredand_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_tum( @@ -606,7 +606,7 @@ vint64m1_t test_vredand_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_tum( @@ -615,7 +615,7 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_tum( @@ -624,7 +624,7 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_tum( @@ -633,7 +633,7 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_tum( @@ -642,7 +642,7 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_tum( @@ -651,7 +651,7 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_tum( @@ -660,7 +660,7 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredand_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_tum( @@ -669,7 +669,7 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_tum( @@ -678,7 +678,7 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_tum( @@ -687,7 +687,7 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_tum( @@ -696,7 +696,7 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_tum( @@ -705,7 +705,7 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_tum( @@ -714,7 +714,7 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredand_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tum( @@ -723,7 +723,7 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_tum( @@ -732,7 +732,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_tum( @@ -741,7 +741,7 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_tum( @@ -750,7 +750,7 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_tum( @@ -759,7 +759,7 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredand_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_tum( @@ -768,7 +768,7 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_tum( @@ -777,7 +777,7 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_tum( @@ -786,7 +786,7 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_tum( @@ -795,6 +795,6 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredand_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredand_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmax.c index eda79cacb687..72e2c7bb83f1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmax.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_tu( @@ -21,7 +21,7 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_tu( @@ -30,7 +30,7 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_tu( @@ -39,7 +39,7 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_tu( @@ -57,7 +57,7 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_tu( @@ -66,7 +66,7 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_tu( @@ -84,7 +84,7 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_tu( @@ -93,7 +93,7 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_tu( @@ -111,7 +111,7 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_tu( @@ -120,7 +120,7 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tu( @@ -129,7 +129,7 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_tu( @@ -138,7 +138,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_tu( @@ -156,7 +156,7 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_tu( @@ -165,7 +165,7 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_tu( @@ -174,7 +174,7 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_tu( @@ -183,7 +183,7 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_tu( @@ -192,7 +192,7 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_tu( @@ -201,7 +201,7 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_tum( @@ -210,7 +210,7 @@ vint64m1_t test_vredmax_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_tum( @@ -219,7 +219,7 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_tum( @@ -228,7 +228,7 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_tum( @@ -237,7 +237,7 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_tum( @@ -246,7 +246,7 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_tum( @@ -255,7 +255,7 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_tum( @@ -264,7 +264,7 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmax_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_tum( @@ -273,7 +273,7 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_tum( @@ -282,7 +282,7 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_tum( @@ -291,7 +291,7 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_tum( @@ -300,7 +300,7 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_tum( @@ -309,7 +309,7 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_tum( @@ -318,7 +318,7 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmax_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tum( @@ -327,7 +327,7 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_tum( @@ -336,7 +336,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_tum( @@ -345,7 +345,7 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_tum( @@ -354,7 +354,7 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_tum( @@ -363,7 +363,7 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmax_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_tum( @@ -372,7 +372,7 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_tum( @@ -381,7 +381,7 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_tum( @@ -390,7 +390,7 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_tum( @@ -399,6 +399,6 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmax_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmax_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmaxu.c index fe03c7e0ee7c..9982d04b4e72 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmaxu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmaxu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_tu( @@ -21,7 +21,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_tu( @@ -30,7 +30,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_tu( @@ -39,7 +39,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_tu( @@ -48,7 +48,7 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_tu( @@ -57,7 +57,7 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_tu( @@ -66,7 +66,7 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_tu( @@ -84,7 +84,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_tu( @@ -93,7 +93,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_tu( @@ -102,7 +102,7 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_tu( @@ -111,7 +111,7 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_tu( @@ -120,7 +120,7 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tu( @@ -129,7 +129,7 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_tu( @@ -138,7 +138,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_tu( @@ -147,7 +147,7 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_tu( @@ -156,7 +156,7 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_tu( @@ -165,7 +165,7 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_tu( @@ -174,7 +174,7 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_tu( @@ -183,7 +183,7 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_tu( @@ -192,7 +192,7 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_tu( @@ -201,7 +201,7 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_tum( @@ -210,7 +210,7 @@ vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_tum( @@ -219,7 +219,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_tum( @@ -228,7 +228,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_tum( @@ -237,7 +237,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_tum( @@ -246,7 +246,7 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_tum( @@ -255,7 +255,7 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_tum( @@ -264,7 +264,7 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredmaxu_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_tum( @@ -273,7 +273,7 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_tum( @@ -282,7 +282,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_tum( @@ -291,7 +291,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_tum( @@ -300,7 +300,7 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_tum( @@ -309,7 +309,7 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_tum( @@ -318,7 +318,7 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredmaxu_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tum( @@ -327,7 +327,7 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_tum( @@ -336,7 +336,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_tum( @@ -345,7 +345,7 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_tum( @@ -354,7 +354,7 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_tum( @@ -363,7 +363,7 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredmaxu_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_tum( @@ -372,7 +372,7 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_tum( @@ -381,7 +381,7 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_tum( @@ -390,7 +390,7 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_tum( @@ -399,6 +399,6 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredmaxu_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmaxu_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmin.c index 0bc59a9b5c5b..875d422f5b20 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredmin.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_tu( @@ -21,7 +21,7 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_tu( @@ -30,7 +30,7 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_tu( @@ -39,7 +39,7 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_tu( @@ -57,7 +57,7 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_tu( @@ -66,7 +66,7 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_tu( @@ -84,7 +84,7 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_tu( @@ -93,7 +93,7 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_tu( @@ -111,7 +111,7 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_tu( @@ -120,7 +120,7 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tu( @@ -129,7 +129,7 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_tu( @@ -138,7 +138,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_tu( @@ -156,7 +156,7 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_tu( @@ -165,7 +165,7 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_tu( @@ -174,7 +174,7 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_tu( @@ -183,7 +183,7 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_tu( @@ -192,7 +192,7 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_tu( @@ -201,7 +201,7 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_tum( @@ -210,7 +210,7 @@ vint64m1_t test_vredmin_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_tum( @@ -219,7 +219,7 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_tum( @@ -228,7 +228,7 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_tum( @@ -237,7 +237,7 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_tum( @@ -246,7 +246,7 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_tum( @@ -255,7 +255,7 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_tum( @@ -264,7 +264,7 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredmin_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_tum( @@ -273,7 +273,7 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_tum( @@ -282,7 +282,7 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_tum( @@ -291,7 +291,7 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_tum( @@ -300,7 +300,7 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_tum( @@ -309,7 +309,7 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_tum( @@ -318,7 +318,7 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredmin_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tum( @@ -327,7 +327,7 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_tum( @@ -336,7 +336,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_tum( @@ -345,7 +345,7 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_tum( @@ -354,7 +354,7 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_tum( @@ -363,7 +363,7 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredmin_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_tum( @@ -372,7 +372,7 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_tum( @@ -381,7 +381,7 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_tum( @@ -390,7 +390,7 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_tum( @@ -399,6 +399,6 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredmin_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredmin_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredminu.c index 1b7f426159c7..d4b4e348a858 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredminu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredminu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_tu( @@ -21,7 +21,7 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_tu( @@ -30,7 +30,7 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_tu( @@ -39,7 +39,7 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_tu( @@ -48,7 +48,7 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_tu( @@ -57,7 +57,7 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_tu( @@ -66,7 +66,7 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_tu( @@ -84,7 +84,7 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_tu( @@ -93,7 +93,7 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_tu( @@ -102,7 +102,7 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_tu( @@ -111,7 +111,7 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_tu( @@ -120,7 +120,7 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tu( @@ -129,7 +129,7 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_tu( @@ -138,7 +138,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_tu( @@ -147,7 +147,7 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_tu( @@ -156,7 +156,7 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_tu( @@ -165,7 +165,7 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_tu( @@ -174,7 +174,7 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_tu( @@ -183,7 +183,7 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_tu( @@ -192,7 +192,7 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_tu( @@ -201,7 +201,7 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_tum( @@ -210,7 +210,7 @@ vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_tum( @@ -219,7 +219,7 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_tum( @@ -228,7 +228,7 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_tum( @@ -237,7 +237,7 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_tum( @@ -246,7 +246,7 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_tum( @@ -255,7 +255,7 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_tum( @@ -264,7 +264,7 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredminu_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_tum( @@ -273,7 +273,7 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_tum( @@ -282,7 +282,7 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_tum( @@ -291,7 +291,7 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_tum( @@ -300,7 +300,7 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_tum( @@ -309,7 +309,7 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_tum( @@ -318,7 +318,7 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredminu_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tum( @@ -327,7 +327,7 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_tum( @@ -336,7 +336,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_tum( @@ -345,7 +345,7 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_tum( @@ -354,7 +354,7 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_tum( @@ -363,7 +363,7 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredminu_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_tum( @@ -372,7 +372,7 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_tum( @@ -381,7 +381,7 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_tum( @@ -390,7 +390,7 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_tum( @@ -399,6 +399,6 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredminu_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredminu_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredor.c index 9358f4dbc669..fa88d7e1558b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_tu( @@ -21,7 +21,7 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_tu( @@ -30,7 +30,7 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_tu( @@ -39,7 +39,7 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vredor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_tu( @@ -57,7 +57,7 @@ vint8m1_t test_vredor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_tu( @@ -66,7 +66,7 @@ vint8m1_t test_vredor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vredor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_tu( @@ -84,7 +84,7 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_tu( @@ -93,7 +93,7 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vredor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_tu( @@ -111,7 +111,7 @@ vint16m1_t test_vredor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_tu( @@ -120,7 +120,7 @@ vint16m1_t test_vredor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tu( @@ -129,7 +129,7 @@ vint16m1_t test_vredor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_tu( @@ -138,7 +138,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vredor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_tu( @@ -156,7 +156,7 @@ vint32m1_t test_vredor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_tu( @@ -165,7 +165,7 @@ vint32m1_t test_vredor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_tu( @@ -174,7 +174,7 @@ vint32m1_t test_vredor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_tu( @@ -183,7 +183,7 @@ vint64m1_t test_vredor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_tu( @@ -192,7 +192,7 @@ vint64m1_t test_vredor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_tu( @@ -201,7 +201,7 @@ vint64m1_t test_vredor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_tu( @@ -210,7 +210,7 @@ vint64m1_t test_vredor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_tu( @@ -219,7 +219,7 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_tu( @@ -228,7 +228,7 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_tu( @@ -237,7 +237,7 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_tu( @@ -246,7 +246,7 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_tu( @@ -255,7 +255,7 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_tu( @@ -264,7 +264,7 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_tu( @@ -273,7 +273,7 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_tu( @@ -282,7 +282,7 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_tu( @@ -291,7 +291,7 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_tu( @@ -300,7 +300,7 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_tu( @@ -309,7 +309,7 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_tu( @@ -318,7 +318,7 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tu( @@ -327,7 +327,7 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_tu( @@ -336,7 +336,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_tu( @@ -345,7 +345,7 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_tu( @@ -354,7 +354,7 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_tu( @@ -363,7 +363,7 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_tu( @@ -372,7 +372,7 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_tu( @@ -381,7 +381,7 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_tu( @@ -390,7 +390,7 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_tu( @@ -399,7 +399,7 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_tum( @@ -408,7 +408,7 @@ vuint64m1_t test_vredor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_tum( @@ -417,7 +417,7 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_tum( @@ -426,7 +426,7 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_tum( @@ -435,7 +435,7 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_tum( @@ -444,7 +444,7 @@ vint8m1_t test_vredor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_tum( @@ -453,7 +453,7 @@ vint8m1_t test_vredor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_tum( @@ -462,7 +462,7 @@ vint8m1_t test_vredor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredor_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vredor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_tum( @@ -480,7 +480,7 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_tum( @@ -489,7 +489,7 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_tum( @@ -498,7 +498,7 @@ vint16m1_t test_vredor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_tum( @@ -507,7 +507,7 @@ vint16m1_t test_vredor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_tum( @@ -516,7 +516,7 @@ vint16m1_t test_vredor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredor_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tum( @@ -525,7 +525,7 @@ vint16m1_t test_vredor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_tum( @@ -534,7 +534,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_tum( @@ -543,7 +543,7 @@ vint32m1_t test_vredor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_tum( @@ -552,7 +552,7 @@ vint32m1_t test_vredor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_tum( @@ -561,7 +561,7 @@ vint32m1_t test_vredor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredor_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_tum( @@ -570,7 +570,7 @@ vint32m1_t test_vredor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_tum( @@ -579,7 +579,7 @@ vint64m1_t test_vredor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_tum( @@ -588,7 +588,7 @@ vint64m1_t test_vredor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_tum( @@ -597,7 +597,7 @@ vint64m1_t test_vredor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredor_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_tum( @@ -606,7 +606,7 @@ vint64m1_t test_vredor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_tum( @@ -615,7 +615,7 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_tum( @@ -624,7 +624,7 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_tum( @@ -633,7 +633,7 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_tum( @@ -642,7 +642,7 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_tum( @@ -651,7 +651,7 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_tum( @@ -660,7 +660,7 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredor_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_tum( @@ -669,7 +669,7 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_tum( @@ -678,7 +678,7 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_tum( @@ -687,7 +687,7 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_tum( @@ -696,7 +696,7 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_tum( @@ -705,7 +705,7 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_tum( @@ -714,7 +714,7 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredor_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tum( @@ -723,7 +723,7 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_tum( @@ -732,7 +732,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_tum( @@ -741,7 +741,7 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_tum( @@ -750,7 +750,7 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_tum( @@ -759,7 +759,7 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredor_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_tum( @@ -768,7 +768,7 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_tum( @@ -777,7 +777,7 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_tum( @@ -786,7 +786,7 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_tum( @@ -795,6 +795,6 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredor_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredor_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredsum.c index d4e60e1b92a1..5b4c0b051866 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredsum.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_tu( @@ -21,7 +21,7 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_tu( @@ -30,7 +30,7 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_tu( @@ -39,7 +39,7 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_tu( @@ -57,7 +57,7 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_tu( @@ -66,7 +66,7 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_tu( @@ -84,7 +84,7 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_tu( @@ -93,7 +93,7 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_tu( @@ -111,7 +111,7 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_tu( @@ -120,7 +120,7 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tu( @@ -129,7 +129,7 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_tu( @@ -138,7 +138,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_tu( @@ -156,7 +156,7 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_tu( @@ -165,7 +165,7 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_tu( @@ -174,7 +174,7 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_tu( @@ -183,7 +183,7 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_tu( @@ -192,7 +192,7 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_tu( @@ -201,7 +201,7 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_tu( @@ -210,7 +210,7 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_tu( @@ -219,7 +219,7 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_tu( @@ -228,7 +228,7 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_tu( @@ -237,7 +237,7 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_tu( @@ -246,7 +246,7 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_tu( @@ -255,7 +255,7 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_tu( @@ -264,7 +264,7 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_tu( @@ -273,7 +273,7 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_tu( @@ -282,7 +282,7 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_tu( @@ -291,7 +291,7 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_tu( @@ -300,7 +300,7 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_tu( @@ -309,7 +309,7 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_tu( @@ -318,7 +318,7 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tu( @@ -327,7 +327,7 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_tu( @@ -336,7 +336,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_tu( @@ -345,7 +345,7 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_tu( @@ -354,7 +354,7 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_tu( @@ -363,7 +363,7 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_tu( @@ -372,7 +372,7 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_tu( @@ -381,7 +381,7 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_tu( @@ -390,7 +390,7 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_tu( @@ -399,7 +399,7 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_tum( @@ -408,7 +408,7 @@ vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_tum( @@ -417,7 +417,7 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_tum( @@ -426,7 +426,7 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_tum( @@ -435,7 +435,7 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_tum( @@ -444,7 +444,7 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_tum( @@ -453,7 +453,7 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_tum( @@ -462,7 +462,7 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredsum_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_tum( @@ -480,7 +480,7 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_tum( @@ -489,7 +489,7 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_tum( @@ -498,7 +498,7 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_tum( @@ -507,7 +507,7 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_tum( @@ -516,7 +516,7 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredsum_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tum( @@ -525,7 +525,7 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_tum( @@ -534,7 +534,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_tum( @@ -543,7 +543,7 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_tum( @@ -552,7 +552,7 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_tum( @@ -561,7 +561,7 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredsum_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_tum( @@ -570,7 +570,7 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_tum( @@ -579,7 +579,7 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_tum( @@ -588,7 +588,7 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_tum( @@ -597,7 +597,7 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredsum_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_tum( @@ -606,7 +606,7 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_tum( @@ -615,7 +615,7 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_tum( @@ -624,7 +624,7 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_tum( @@ -633,7 +633,7 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_tum( @@ -642,7 +642,7 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_tum( @@ -651,7 +651,7 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_tum( @@ -660,7 +660,7 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredsum_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_tum( @@ -669,7 +669,7 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_tum( @@ -678,7 +678,7 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_tum( @@ -687,7 +687,7 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_tum( @@ -696,7 +696,7 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_tum( @@ -705,7 +705,7 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_tum( @@ -714,7 +714,7 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredsum_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tum( @@ -723,7 +723,7 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_tum( @@ -732,7 +732,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_tum( @@ -741,7 +741,7 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_tum( @@ -750,7 +750,7 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_tum( @@ -759,7 +759,7 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredsum_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_tum( @@ -768,7 +768,7 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_tum( @@ -777,7 +777,7 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_tum( @@ -786,7 +786,7 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_tum( @@ -795,6 +795,6 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredsum_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredsum_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredxor.c index 01ca0d115c63..17cb520c35aa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vredxor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_tu( @@ -21,7 +21,7 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_tu( @@ -30,7 +30,7 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_tu( @@ -39,7 +39,7 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_tu( @@ -57,7 +57,7 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_tu( @@ -66,7 +66,7 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_tu( @@ -84,7 +84,7 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_tu( @@ -93,7 +93,7 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_tu( @@ -111,7 +111,7 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_tu( @@ -120,7 +120,7 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tu( @@ -129,7 +129,7 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_tu( @@ -138,7 +138,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vec // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_tu( @@ -156,7 +156,7 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_tu( @@ -165,7 +165,7 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_tu( @@ -174,7 +174,7 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_tu( @@ -183,7 +183,7 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_tu( @@ -192,7 +192,7 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_tu( @@ -201,7 +201,7 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_tu( @@ -210,7 +210,7 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_tu( @@ -219,7 +219,7 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_tu( @@ -228,7 +228,7 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_tu( @@ -237,7 +237,7 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vecto // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_tu( @@ -246,7 +246,7 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_tu( @@ -255,7 +255,7 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_tu( @@ -264,7 +264,7 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_tu( @@ -273,7 +273,7 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_tu( @@ -282,7 +282,7 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_tu( @@ -291,7 +291,7 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_tu( @@ -300,7 +300,7 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_tu( @@ -309,7 +309,7 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_tu( @@ -318,7 +318,7 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tu( @@ -327,7 +327,7 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_tu( @@ -336,7 +336,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_tu( @@ -345,7 +345,7 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_tu( @@ -354,7 +354,7 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_tu( @@ -363,7 +363,7 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_tu( @@ -372,7 +372,7 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_tu( @@ -381,7 +381,7 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_tu( @@ -390,7 +390,7 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_tu( @@ -399,7 +399,7 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_tum( @@ -408,7 +408,7 @@ vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_tum( @@ -417,7 +417,7 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_tum( @@ -426,7 +426,7 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_tum( @@ -435,7 +435,7 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_tum( @@ -444,7 +444,7 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_tum( @@ -453,7 +453,7 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_tum( @@ -462,7 +462,7 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return vredxor_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_tum( @@ -480,7 +480,7 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_tum( @@ -489,7 +489,7 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_tum( @@ -498,7 +498,7 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_tum( @@ -507,7 +507,7 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_tum( @@ -516,7 +516,7 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return vredxor_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tum( @@ -525,7 +525,7 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_tum( @@ -534,7 +534,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_tum( @@ -543,7 +543,7 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_tum( @@ -552,7 +552,7 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_tum( @@ -561,7 +561,7 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return vredxor_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_tum( @@ -570,7 +570,7 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_tum( @@ -579,7 +579,7 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_tum( @@ -588,7 +588,7 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_tum( @@ -597,7 +597,7 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return vredxor_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_tum( @@ -606,7 +606,7 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_tum( @@ -615,7 +615,7 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_tum( @@ -624,7 +624,7 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_tum( @@ -633,7 +633,7 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_tum( @@ -642,7 +642,7 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_tum( @@ -651,7 +651,7 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_tum( @@ -660,7 +660,7 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return vredxor_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_tum( @@ -669,7 +669,7 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_tum( @@ -678,7 +678,7 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_tum( @@ -687,7 +687,7 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_tum( @@ -696,7 +696,7 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_tum( @@ -705,7 +705,7 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_tum( @@ -714,7 +714,7 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return vredxor_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tum( @@ -723,7 +723,7 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_tum( @@ -732,7 +732,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_tum( @@ -741,7 +741,7 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_tum( @@ -750,7 +750,7 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_tum( @@ -759,7 +759,7 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return vredxor_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_tum( @@ -768,7 +768,7 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_tum( @@ -777,7 +777,7 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_tum( @@ -786,7 +786,7 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_tum( @@ -795,6 +795,6 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return vredxor_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vredxor_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrem.c index fed8de03f6dd..cdacc386048e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrem.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vrem_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vrem_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vrem_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vrem_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vrem_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vrem_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vrem_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vrem_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vrem_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vrem_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vrem_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vrem_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vrem_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vrem_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vrem_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vrem_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vrem_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vrem_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vrem_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vrem_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vrem_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vrem_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vrem_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vrem_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vrem_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vrem_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vrem_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vrem_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vrem_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vrem_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vrem_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vrem_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vrem_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vrem_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vrem_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vrem_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vrem_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vrem_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vrem_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vrem_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vrem_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vrem_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vrem_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vrem_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vrem_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vrem_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vrem_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vrem_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vrem_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vrem_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vrem_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vrem_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vrem_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vrem_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vrem_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vrem_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vrem_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vrem_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vrem_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vrem_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vrem_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vrem_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vrem_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vrem_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vrem_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vrem_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vrem_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vrem_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vrem_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vrem_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vrem_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vrem_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vrem_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vrem_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vrem_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vrem_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vrem_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vrem_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vrem_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vrem_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vrem_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vrem_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vrem_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vrem_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vrem_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vrem_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vrem_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vrem_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vrem_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vrem_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vrem_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vrem_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vrem_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vrem_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vrem_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vrem_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vrem_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vrem_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vrem_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vrem_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vrem_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vrem_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vrem_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vrem_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vrem_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vrem_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vrem_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vrem_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vrem_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vrem_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vrem_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vrem_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vrem_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vrem_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vrem_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vrem_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vrem_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vrem_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vrem_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vrem_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vrem_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vrem_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vrem_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vrem_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vrem_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vrem_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vrem_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vrem_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vrem_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vrem_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vrem_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vrem_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vrem_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vrem_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vrem_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vrem_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vrem_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vrem_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vrem_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vrem_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vrem_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vrem_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vrem_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vrem_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vrem_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vrem_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vrem_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vrem_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vrem_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vrem_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vrem_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vrem_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vrem_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vrem_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vrem_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vrem_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vrem_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vrem_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vrem_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vrem_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vrem_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vrem_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vrem_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vrem_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vrem_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vrem_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vrem_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vrem_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vrem_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vrem_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vrem_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vrem_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vrem_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vrem_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vrem_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vrem_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vrem_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vrem_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vrem_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vrem_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vrem_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vrem_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vrem_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vrem_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vrem_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vrem_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vrem_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vrem_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vrem_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vrem_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vrem_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vrem_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vrem_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vrem_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vrem_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vrem_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vrem_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vrem_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vrem_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vrem_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vrem_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vrem_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vrem_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vrem_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vrem_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vrem_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vrem_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vrem_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vrem_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vrem_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vrem_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vrem_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vrem_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vrem_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrem_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vrem_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vrem_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vrem_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vrem_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vrem_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vrem_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vrem_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vrem_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vrem_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vrem_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vrem_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vrem_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vrem_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vrem_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vrem_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vrem_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vrem_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vrem_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrem_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vrem_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vrem_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vrem_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vrem_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vrem_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vrem_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vrem_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vrem_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vrem_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vrem_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vrem_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vrem_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vrem_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vrem_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vrem_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrem_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vrem_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vrem_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vrem_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vrem_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vrem_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vrem_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vrem_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vrem_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vrem_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vrem_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vrem_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vrem_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrem_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrem_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vremu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vremu.c index 774d80da542a..7b1373e3082d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vremu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vremu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vremu_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vremu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vremu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vremu_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vremu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vremu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vremu_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vremu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vremu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vremu_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vremu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vremu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vremu_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vremu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vremu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vremu_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vremu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vremu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vremu_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m8_t test_vremu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_tu( @@ -138,7 +138,7 @@ vuint8m8_t test_vremu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vremu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_tu( @@ -147,7 +147,7 @@ vuint16mf4_t test_vremu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_tu( @@ -156,7 +156,7 @@ vuint16mf4_t test_vremu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vremu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_tu( @@ -165,7 +165,7 @@ vuint16mf2_t test_vremu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_tu( @@ -174,7 +174,7 @@ vuint16mf2_t test_vremu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vremu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_tu( @@ -183,7 +183,7 @@ vuint16m1_t test_vremu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vremu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vremu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vremu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_tu( @@ -210,7 +210,7 @@ vuint16m2_t test_vremu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vremu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_tu( @@ -219,7 +219,7 @@ vuint16m4_t test_vremu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_tu( @@ -228,7 +228,7 @@ vuint16m4_t test_vremu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vremu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_tu( @@ -237,7 +237,7 @@ vuint16m8_t test_vremu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint16m8_t test_vremu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vremu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vremu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vremu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vremu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vremu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_tu( @@ -291,7 +291,7 @@ vuint32m2_t test_vremu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_tu( @@ -300,7 +300,7 @@ vuint32m2_t test_vremu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vremu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_tu( @@ -309,7 +309,7 @@ vuint32m4_t test_vremu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_tu( @@ -318,7 +318,7 @@ vuint32m4_t test_vremu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vremu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_tu( @@ -327,7 +327,7 @@ vuint32m8_t test_vremu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_tu( @@ -336,7 +336,7 @@ vuint32m8_t test_vremu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vremu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_tu( @@ -345,7 +345,7 @@ vuint64m1_t test_vremu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_tu( @@ -354,7 +354,7 @@ vuint64m1_t test_vremu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vremu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_tu( @@ -363,7 +363,7 @@ vuint64m2_t test_vremu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_tu( @@ -372,7 +372,7 @@ vuint64m2_t test_vremu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vremu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_tu( @@ -381,7 +381,7 @@ vuint64m4_t test_vremu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_tu( @@ -390,7 +390,7 @@ vuint64m4_t test_vremu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vremu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m8_t test_vremu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vremu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vremu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_tum( @@ -417,7 +417,7 @@ vuint8mf8_t test_vremu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_tum( @@ -426,7 +426,7 @@ vuint8mf8_t test_vremu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vremu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_tum( @@ -435,7 +435,7 @@ vuint8mf4_t test_vremu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_tum( @@ -444,7 +444,7 @@ vuint8mf4_t test_vremu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vremu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_tum( @@ -453,7 +453,7 @@ vuint8mf2_t test_vremu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_tum( @@ -462,7 +462,7 @@ vuint8mf2_t test_vremu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vremu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_tum( @@ -471,7 +471,7 @@ vuint8m1_t test_vremu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_tum( @@ -480,7 +480,7 @@ vuint8m1_t test_vremu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vremu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_tum( @@ -489,7 +489,7 @@ vuint8m2_t test_vremu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_tum( @@ -498,7 +498,7 @@ vuint8m2_t test_vremu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vremu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_tum( @@ -507,7 +507,7 @@ vuint8m4_t test_vremu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_tum( @@ -516,7 +516,7 @@ vuint8m4_t test_vremu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vremu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_tum( @@ -525,7 +525,7 @@ vuint8m8_t test_vremu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_tum( @@ -534,7 +534,7 @@ vuint8m8_t test_vremu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vremu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_tum( @@ -543,7 +543,7 @@ vuint16mf4_t test_vremu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_tum( @@ -552,7 +552,7 @@ vuint16mf4_t test_vremu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vremu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_tum( @@ -561,7 +561,7 @@ vuint16mf2_t test_vremu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_tum( @@ -570,7 +570,7 @@ vuint16mf2_t test_vremu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vremu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_tum( @@ -579,7 +579,7 @@ vuint16m1_t test_vremu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_tum( @@ -588,7 +588,7 @@ vuint16m1_t test_vremu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vremu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_tum( @@ -597,7 +597,7 @@ vuint16m2_t test_vremu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_tum( @@ -606,7 +606,7 @@ vuint16m2_t test_vremu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vremu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_tum( @@ -615,7 +615,7 @@ vuint16m4_t test_vremu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_tum( @@ -624,7 +624,7 @@ vuint16m4_t test_vremu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vremu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_tum( @@ -633,7 +633,7 @@ vuint16m8_t test_vremu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tum( @@ -642,7 +642,7 @@ vuint16m8_t test_vremu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vremu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tum( @@ -651,7 +651,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_tum( @@ -660,7 +660,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vremu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_tum( @@ -669,7 +669,7 @@ vuint32m1_t test_vremu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_tum( @@ -678,7 +678,7 @@ vuint32m1_t test_vremu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vremu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_tum( @@ -687,7 +687,7 @@ vuint32m2_t test_vremu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_tum( @@ -696,7 +696,7 @@ vuint32m2_t test_vremu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vremu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_tum( @@ -705,7 +705,7 @@ vuint32m4_t test_vremu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_tum( @@ -714,7 +714,7 @@ vuint32m4_t test_vremu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vremu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_tum( @@ -723,7 +723,7 @@ vuint32m8_t test_vremu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_tum( @@ -732,7 +732,7 @@ vuint32m8_t test_vremu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vremu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_tum( @@ -741,7 +741,7 @@ vuint64m1_t test_vremu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_tum( @@ -750,7 +750,7 @@ vuint64m1_t test_vremu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vremu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_tum( @@ -759,7 +759,7 @@ vuint64m2_t test_vremu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_tum( @@ -768,7 +768,7 @@ vuint64m2_t test_vremu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vremu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_tum( @@ -777,7 +777,7 @@ vuint64m4_t test_vremu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_tum( @@ -786,7 +786,7 @@ vuint64m4_t test_vremu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vremu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m8_t test_vremu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vremu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vremu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_tumu( @@ -813,7 +813,7 @@ vuint8mf8_t test_vremu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_tumu( @@ -822,7 +822,7 @@ vuint8mf8_t test_vremu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vremu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_tumu( @@ -831,7 +831,7 @@ vuint8mf4_t test_vremu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_tumu( @@ -840,7 +840,7 @@ vuint8mf4_t test_vremu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vremu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_tumu( @@ -849,7 +849,7 @@ vuint8mf2_t test_vremu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_tumu( @@ -858,7 +858,7 @@ vuint8mf2_t test_vremu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vremu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_tumu( @@ -867,7 +867,7 @@ vuint8m1_t test_vremu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_tumu( @@ -876,7 +876,7 @@ vuint8m1_t test_vremu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vremu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_tumu( @@ -885,7 +885,7 @@ vuint8m2_t test_vremu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_tumu( @@ -894,7 +894,7 @@ vuint8m2_t test_vremu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vremu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_tumu( @@ -903,7 +903,7 @@ vuint8m4_t test_vremu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_tumu( @@ -912,7 +912,7 @@ vuint8m4_t test_vremu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vremu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_tumu( @@ -921,7 +921,7 @@ vuint8m8_t test_vremu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_tumu( @@ -930,7 +930,7 @@ vuint8m8_t test_vremu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vremu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_tumu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vremu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_tumu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vremu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vremu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_tumu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vremu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_tumu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vremu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vremu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_tumu( @@ -975,7 +975,7 @@ vuint16m1_t test_vremu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_tumu( @@ -984,7 +984,7 @@ vuint16m1_t test_vremu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vremu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_tumu( @@ -993,7 +993,7 @@ vuint16m2_t test_vremu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_tumu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vremu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vremu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_tumu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vremu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_tumu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vremu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vremu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_tumu( @@ -1029,7 +1029,7 @@ vuint16m8_t test_vremu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_tumu( @@ -1038,7 +1038,7 @@ vuint16m8_t test_vremu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vremu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_tumu( @@ -1047,7 +1047,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_tumu( @@ -1056,7 +1056,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vremu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_tumu( @@ -1065,7 +1065,7 @@ vuint32m1_t test_vremu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_tumu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vremu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vremu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_tumu( @@ -1083,7 +1083,7 @@ vuint32m2_t test_vremu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_tumu( @@ -1092,7 +1092,7 @@ vuint32m2_t test_vremu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vremu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_tumu( @@ -1101,7 +1101,7 @@ vuint32m4_t test_vremu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_tumu( @@ -1110,7 +1110,7 @@ vuint32m4_t test_vremu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vremu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_tumu( @@ -1119,7 +1119,7 @@ vuint32m8_t test_vremu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_tumu( @@ -1128,7 +1128,7 @@ vuint32m8_t test_vremu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vremu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_tumu( @@ -1137,7 +1137,7 @@ vuint64m1_t test_vremu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_tumu( @@ -1146,7 +1146,7 @@ vuint64m1_t test_vremu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vremu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_tumu( @@ -1155,7 +1155,7 @@ vuint64m2_t test_vremu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_tumu( @@ -1164,7 +1164,7 @@ vuint64m2_t test_vremu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vremu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_tumu( @@ -1173,7 +1173,7 @@ vuint64m4_t test_vremu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_tumu( @@ -1182,7 +1182,7 @@ vuint64m4_t test_vremu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vremu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m8_t test_vremu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vremu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vremu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_mu( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vremu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_mu( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vremu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vremu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_mu( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vremu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_mu( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vremu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vremu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_mu( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vremu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_mu( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vremu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vremu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_mu( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vremu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_mu( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vremu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vremu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_mu( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vremu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_mu( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vremu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vremu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_mu( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vremu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_mu( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vremu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vremu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_mu( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vremu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vremu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_mu( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vremu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vremu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_mu( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vremu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_mu( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vremu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vremu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_mu( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vremu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_mu( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vremu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vremu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_mu( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vremu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_mu( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vremu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vremu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_mu( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vremu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_mu( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vremu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vremu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_mu( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vremu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_mu( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vremu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vremu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_mu( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vremu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vremu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_mu( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vremu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vremu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_mu( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_mu( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vremu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_mu( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vremu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_mu( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vremu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vremu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_mu( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vremu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_mu( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vremu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vremu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_mu( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vremu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_mu( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vremu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vremu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_mu( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vremu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vremu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_mu( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vremu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vremu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_mu( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vremu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_mu( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vremu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vremu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_mu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vremu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_mu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vremu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vremu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_mu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vremu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_mu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vremu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vremu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vremu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vremu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vremu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c index 59c08fff511d..94546c65fd9e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_f16mf4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16mf4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16mf4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_tu( @@ -31,7 +31,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_f16mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_tu( @@ -40,7 +40,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_tu( @@ -49,7 +49,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_f16m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_tu( @@ -58,7 +58,7 @@ vfloat16m1_t test_vrgather_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_tu( @@ -67,7 +67,7 @@ vfloat16m1_t test_vrgather_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_f16m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_tu( @@ -76,7 +76,7 @@ vfloat16m2_t test_vrgather_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_tu( @@ -85,7 +85,7 @@ vfloat16m2_t test_vrgather_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_f16m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_tu( @@ -94,7 +94,7 @@ vfloat16m4_t test_vrgather_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_tu( @@ -103,7 +103,7 @@ vfloat16m4_t test_vrgather_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_f16m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_tu( @@ -112,7 +112,7 @@ vfloat16m8_t test_vrgather_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tu( @@ -121,7 +121,7 @@ vfloat16m8_t test_vrgather_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_f32mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tu( @@ -130,7 +130,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_tu( @@ -139,7 +139,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_f32m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_tu( @@ -148,7 +148,7 @@ vfloat32m1_t test_vrgather_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_tu( @@ -157,7 +157,7 @@ vfloat32m1_t test_vrgather_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_f32m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_tu( @@ -166,7 +166,7 @@ vfloat32m2_t test_vrgather_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_tu( @@ -175,7 +175,7 @@ vfloat32m2_t test_vrgather_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_f32m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_tu( @@ -184,7 +184,7 @@ vfloat32m4_t test_vrgather_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_tu( @@ -193,7 +193,7 @@ vfloat32m4_t test_vrgather_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_f32m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_tu( @@ -202,7 +202,7 @@ vfloat32m8_t test_vrgather_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_tu( @@ -211,7 +211,7 @@ vfloat32m8_t test_vrgather_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_f64m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_tu( @@ -220,7 +220,7 @@ vfloat64m1_t test_vrgather_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_tu( @@ -229,7 +229,7 @@ vfloat64m1_t test_vrgather_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_f64m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_tu( @@ -238,7 +238,7 @@ vfloat64m2_t test_vrgather_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_tu( @@ -247,7 +247,7 @@ vfloat64m2_t test_vrgather_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_f64m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_tu( @@ -256,7 +256,7 @@ vfloat64m4_t test_vrgather_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_tu( @@ -265,7 +265,7 @@ vfloat64m4_t test_vrgather_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_f64m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_tu( @@ -274,7 +274,7 @@ vfloat64m8_t test_vrgather_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_tu( @@ -283,7 +283,7 @@ vfloat64m8_t test_vrgather_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_i8mf8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_tu( @@ -292,7 +292,7 @@ vint8mf8_t test_vrgather_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_tu( @@ -301,7 +301,7 @@ vint8mf8_t test_vrgather_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_i8mf4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_tu( @@ -310,7 +310,7 @@ vint8mf4_t test_vrgather_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_tu( @@ -319,7 +319,7 @@ vint8mf4_t test_vrgather_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_i8mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_tu( @@ -328,7 +328,7 @@ vint8mf2_t test_vrgather_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_tu( @@ -337,7 +337,7 @@ vint8mf2_t test_vrgather_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_i8m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_tu( @@ -346,7 +346,7 @@ vint8m1_t test_vrgather_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_tu( @@ -355,7 +355,7 @@ vint8m1_t test_vrgather_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_i8m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_tu( @@ -364,7 +364,7 @@ vint8m2_t test_vrgather_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_tu( @@ -373,7 +373,7 @@ vint8m2_t test_vrgather_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_i8m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_tu( @@ -382,7 +382,7 @@ vint8m4_t test_vrgather_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_tu( @@ -391,7 +391,7 @@ vint8m4_t test_vrgather_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_i8m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_tu( @@ -400,7 +400,7 @@ vint8m8_t test_vrgather_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_tu( @@ -409,7 +409,7 @@ vint8m8_t test_vrgather_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_i16mf4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16mf4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_tu( @@ -418,7 +418,7 @@ vint16mf4_t test_vrgather_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16mf4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_tu( @@ -427,7 +427,7 @@ vint16mf4_t test_vrgather_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_i16mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_tu( @@ -436,7 +436,7 @@ vint16mf2_t test_vrgather_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_tu( @@ -445,7 +445,7 @@ vint16mf2_t test_vrgather_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_i16m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_tu( @@ -454,7 +454,7 @@ vint16m1_t test_vrgather_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_tu( @@ -463,7 +463,7 @@ vint16m1_t test_vrgather_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_i16m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_tu( @@ -472,7 +472,7 @@ vint16m2_t test_vrgather_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_tu( @@ -481,7 +481,7 @@ vint16m2_t test_vrgather_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_i16m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_tu( @@ -490,7 +490,7 @@ vint16m4_t test_vrgather_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_tu( @@ -499,7 +499,7 @@ vint16m4_t test_vrgather_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_i16m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_tu( @@ -508,7 +508,7 @@ vint16m8_t test_vrgather_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tu( @@ -517,7 +517,7 @@ vint16m8_t test_vrgather_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_i32mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tu( @@ -526,7 +526,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_tu( @@ -535,7 +535,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_i32m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_tu( @@ -544,7 +544,7 @@ vint32m1_t test_vrgather_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_tu( @@ -553,7 +553,7 @@ vint32m1_t test_vrgather_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_i32m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_tu( @@ -562,7 +562,7 @@ vint32m2_t test_vrgather_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_tu( @@ -571,7 +571,7 @@ vint32m2_t test_vrgather_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_i32m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_tu( @@ -580,7 +580,7 @@ vint32m4_t test_vrgather_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_tu( @@ -589,7 +589,7 @@ vint32m4_t test_vrgather_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_i32m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_tu( @@ -598,7 +598,7 @@ vint32m8_t test_vrgather_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_tu( @@ -607,7 +607,7 @@ vint32m8_t test_vrgather_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_i64m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_tu( @@ -616,7 +616,7 @@ vint64m1_t test_vrgather_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_tu( @@ -625,7 +625,7 @@ vint64m1_t test_vrgather_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_i64m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_tu( @@ -634,7 +634,7 @@ vint64m2_t test_vrgather_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_tu( @@ -643,7 +643,7 @@ vint64m2_t test_vrgather_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_i64m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_tu( @@ -652,7 +652,7 @@ vint64m4_t test_vrgather_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_tu( @@ -661,7 +661,7 @@ vint64m4_t test_vrgather_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_i64m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_tu( @@ -670,7 +670,7 @@ vint64m8_t test_vrgather_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_tu( @@ -679,7 +679,7 @@ vint64m8_t test_vrgather_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_u8mf8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_tu( @@ -688,7 +688,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_tu( @@ -697,7 +697,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_u8mf4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_tu( @@ -706,7 +706,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_tu( @@ -715,7 +715,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_u8mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_tu( @@ -724,7 +724,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_tu( @@ -733,7 +733,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_u8m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_tu( @@ -742,7 +742,7 @@ vuint8m1_t test_vrgather_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_tu( @@ -751,7 +751,7 @@ vuint8m1_t test_vrgather_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_u8m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_tu( @@ -760,7 +760,7 @@ vuint8m2_t test_vrgather_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_tu( @@ -769,7 +769,7 @@ vuint8m2_t test_vrgather_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_u8m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_tu( @@ -778,7 +778,7 @@ vuint8m4_t test_vrgather_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_tu( @@ -787,7 +787,7 @@ vuint8m4_t test_vrgather_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_u8m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_tu( @@ -796,7 +796,7 @@ vuint8m8_t test_vrgather_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_tu( @@ -805,7 +805,7 @@ vuint8m8_t test_vrgather_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_u16mf4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16mf4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_tu( @@ -814,7 +814,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16mf4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_tu( @@ -823,7 +823,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_u16mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_tu( @@ -832,7 +832,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_tu( @@ -841,7 +841,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_u16m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_tu( @@ -850,7 +850,7 @@ vuint16m1_t test_vrgather_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_tu( @@ -859,7 +859,7 @@ vuint16m1_t test_vrgather_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_u16m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_tu( @@ -868,7 +868,7 @@ vuint16m2_t test_vrgather_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_tu( @@ -877,7 +877,7 @@ vuint16m2_t test_vrgather_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_u16m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_tu( @@ -886,7 +886,7 @@ vuint16m4_t test_vrgather_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_tu( @@ -895,7 +895,7 @@ vuint16m4_t test_vrgather_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_u16m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_tu( @@ -904,7 +904,7 @@ vuint16m8_t test_vrgather_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tu( @@ -913,7 +913,7 @@ vuint16m8_t test_vrgather_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_u32mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tu( @@ -922,7 +922,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32mf2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32mf2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_tu( @@ -931,7 +931,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_u32m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_tu( @@ -940,7 +940,7 @@ vuint32m1_t test_vrgather_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_tu( @@ -949,7 +949,7 @@ vuint32m1_t test_vrgather_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_u32m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_tu( @@ -958,7 +958,7 @@ vuint32m2_t test_vrgather_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_tu( @@ -967,7 +967,7 @@ vuint32m2_t test_vrgather_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_u32m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_tu( @@ -976,7 +976,7 @@ vuint32m4_t test_vrgather_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_tu( @@ -985,7 +985,7 @@ vuint32m4_t test_vrgather_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_u32m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_tu( @@ -994,7 +994,7 @@ vuint32m8_t test_vrgather_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_tu( @@ -1003,7 +1003,7 @@ vuint32m8_t test_vrgather_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_u64m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_tu( @@ -1012,7 +1012,7 @@ vuint64m1_t test_vrgather_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m1_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m1_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_tu( @@ -1021,7 +1021,7 @@ vuint64m1_t test_vrgather_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_u64m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_tu( @@ -1030,7 +1030,7 @@ vuint64m2_t test_vrgather_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m2_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m2_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_tu( @@ -1039,7 +1039,7 @@ vuint64m2_t test_vrgather_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_u64m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_tu( @@ -1048,7 +1048,7 @@ vuint64m4_t test_vrgather_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m4_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m4_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_tu( @@ -1057,7 +1057,7 @@ vuint64m4_t test_vrgather_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_u64m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_tu( @@ -1066,7 +1066,7 @@ vuint64m8_t test_vrgather_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m8_tu(maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m8_tu(maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_tum( @@ -1075,7 +1075,7 @@ vuint64m8_t test_vrgather_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_f16mf4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16mf4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_tum( @@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16mf4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_tum( @@ -1093,7 +1093,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_f16mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_tum( @@ -1102,7 +1102,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_tum( @@ -1111,7 +1111,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_f16m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_tum( @@ -1120,7 +1120,7 @@ vfloat16m1_t test_vrgather_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_tum( @@ -1129,7 +1129,7 @@ vfloat16m1_t test_vrgather_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_f16m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_tum( @@ -1138,7 +1138,7 @@ vfloat16m2_t test_vrgather_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_tum( @@ -1147,7 +1147,7 @@ vfloat16m2_t test_vrgather_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_f16m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_tum( @@ -1156,7 +1156,7 @@ vfloat16m4_t test_vrgather_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_tum( @@ -1165,7 +1165,7 @@ vfloat16m4_t test_vrgather_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_f16m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_tum( @@ -1174,7 +1174,7 @@ vfloat16m8_t test_vrgather_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tum( @@ -1183,7 +1183,7 @@ vfloat16m8_t test_vrgather_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_f32mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tum( @@ -1192,7 +1192,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_tum( @@ -1201,7 +1201,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_f32m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_tum( @@ -1210,7 +1210,7 @@ vfloat32m1_t test_vrgather_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_tum( @@ -1219,7 +1219,7 @@ vfloat32m1_t test_vrgather_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_f32m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_tum( @@ -1228,7 +1228,7 @@ vfloat32m2_t test_vrgather_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_tum( @@ -1237,7 +1237,7 @@ vfloat32m2_t test_vrgather_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_f32m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_tum( @@ -1246,7 +1246,7 @@ vfloat32m4_t test_vrgather_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_tum( @@ -1255,7 +1255,7 @@ vfloat32m4_t test_vrgather_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_f32m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_tum( @@ -1264,7 +1264,7 @@ vfloat32m8_t test_vrgather_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_tum( @@ -1273,7 +1273,7 @@ vfloat32m8_t test_vrgather_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_f64m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_tum( @@ -1282,7 +1282,7 @@ vfloat64m1_t test_vrgather_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_tum( @@ -1291,7 +1291,7 @@ vfloat64m1_t test_vrgather_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_f64m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_tum( @@ -1300,7 +1300,7 @@ vfloat64m2_t test_vrgather_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_tum( @@ -1309,7 +1309,7 @@ vfloat64m2_t test_vrgather_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_f64m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_tum( @@ -1318,7 +1318,7 @@ vfloat64m4_t test_vrgather_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_tum( @@ -1327,7 +1327,7 @@ vfloat64m4_t test_vrgather_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_f64m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_tum( @@ -1336,7 +1336,7 @@ vfloat64m8_t test_vrgather_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_tum( @@ -1345,7 +1345,7 @@ vfloat64m8_t test_vrgather_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_i8mf8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_tum( @@ -1354,7 +1354,7 @@ vint8mf8_t test_vrgather_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_tum( @@ -1363,7 +1363,7 @@ vint8mf8_t test_vrgather_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_i8mf4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_tum( @@ -1372,7 +1372,7 @@ vint8mf4_t test_vrgather_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_tum( @@ -1381,7 +1381,7 @@ vint8mf4_t test_vrgather_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_i8mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_tum( @@ -1390,7 +1390,7 @@ vint8mf2_t test_vrgather_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_tum( @@ -1399,7 +1399,7 @@ vint8mf2_t test_vrgather_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_i8m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_tum( @@ -1408,7 +1408,7 @@ vint8m1_t test_vrgather_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_tum( @@ -1417,7 +1417,7 @@ vint8m1_t test_vrgather_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_i8m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_tum( @@ -1426,7 +1426,7 @@ vint8m2_t test_vrgather_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_tum( @@ -1435,7 +1435,7 @@ vint8m2_t test_vrgather_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_i8m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_tum( @@ -1444,7 +1444,7 @@ vint8m4_t test_vrgather_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_tum( @@ -1453,7 +1453,7 @@ vint8m4_t test_vrgather_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_i8m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_tum( @@ -1462,7 +1462,7 @@ vint8m8_t test_vrgather_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_tum( @@ -1471,7 +1471,7 @@ vint8m8_t test_vrgather_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_i16mf4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16mf4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_tum( @@ -1480,7 +1480,7 @@ vint16mf4_t test_vrgather_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16mf4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_tum( @@ -1489,7 +1489,7 @@ vint16mf4_t test_vrgather_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_i16mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_tum( @@ -1498,7 +1498,7 @@ vint16mf2_t test_vrgather_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_tum( @@ -1507,7 +1507,7 @@ vint16mf2_t test_vrgather_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_i16m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_tum( @@ -1516,7 +1516,7 @@ vint16m1_t test_vrgather_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_tum( @@ -1525,7 +1525,7 @@ vint16m1_t test_vrgather_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_i16m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_tum( @@ -1534,7 +1534,7 @@ vint16m2_t test_vrgather_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_tum( @@ -1543,7 +1543,7 @@ vint16m2_t test_vrgather_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_i16m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_tum( @@ -1552,7 +1552,7 @@ vint16m4_t test_vrgather_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_tum( @@ -1561,7 +1561,7 @@ vint16m4_t test_vrgather_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_i16m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_tum( @@ -1570,7 +1570,7 @@ vint16m8_t test_vrgather_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tum( @@ -1579,7 +1579,7 @@ vint16m8_t test_vrgather_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_i32mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tum( @@ -1588,7 +1588,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_tum( @@ -1597,7 +1597,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_i32m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_tum( @@ -1606,7 +1606,7 @@ vint32m1_t test_vrgather_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_tum( @@ -1615,7 +1615,7 @@ vint32m1_t test_vrgather_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_i32m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_tum( @@ -1624,7 +1624,7 @@ vint32m2_t test_vrgather_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_tum( @@ -1633,7 +1633,7 @@ vint32m2_t test_vrgather_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_i32m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_tum( @@ -1642,7 +1642,7 @@ vint32m4_t test_vrgather_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_tum( @@ -1651,7 +1651,7 @@ vint32m4_t test_vrgather_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_i32m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_tum( @@ -1660,7 +1660,7 @@ vint32m8_t test_vrgather_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_tum( @@ -1669,7 +1669,7 @@ vint32m8_t test_vrgather_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_i64m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_tum( @@ -1678,7 +1678,7 @@ vint64m1_t test_vrgather_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_tum( @@ -1687,7 +1687,7 @@ vint64m1_t test_vrgather_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_i64m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_tum( @@ -1696,7 +1696,7 @@ vint64m2_t test_vrgather_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_tum( @@ -1705,7 +1705,7 @@ vint64m2_t test_vrgather_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_i64m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_tum( @@ -1714,7 +1714,7 @@ vint64m4_t test_vrgather_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_tum( @@ -1723,7 +1723,7 @@ vint64m4_t test_vrgather_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_i64m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_tum( @@ -1732,7 +1732,7 @@ vint64m8_t test_vrgather_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_tum( @@ -1741,7 +1741,7 @@ vint64m8_t test_vrgather_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_u8mf8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_tum( @@ -1750,7 +1750,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_tum( @@ -1759,7 +1759,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_u8mf4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_tum( @@ -1768,7 +1768,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_tum( @@ -1777,7 +1777,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_u8mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_tum( @@ -1786,7 +1786,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_tum( @@ -1795,7 +1795,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_u8m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_tum( @@ -1804,7 +1804,7 @@ vuint8m1_t test_vrgather_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_tum( @@ -1813,7 +1813,7 @@ vuint8m1_t test_vrgather_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_u8m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_tum( @@ -1822,7 +1822,7 @@ vuint8m2_t test_vrgather_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_tum( @@ -1831,7 +1831,7 @@ vuint8m2_t test_vrgather_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_u8m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_tum( @@ -1840,7 +1840,7 @@ vuint8m4_t test_vrgather_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_tum( @@ -1849,7 +1849,7 @@ vuint8m4_t test_vrgather_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_u8m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_tum( @@ -1858,7 +1858,7 @@ vuint8m8_t test_vrgather_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_tum( @@ -1867,7 +1867,7 @@ vuint8m8_t test_vrgather_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_u16mf4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16mf4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_tum( @@ -1876,7 +1876,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16mf4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_tum( @@ -1885,7 +1885,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_u16mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_tum( @@ -1894,7 +1894,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_tum( @@ -1903,7 +1903,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_u16m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_tum( @@ -1912,7 +1912,7 @@ vuint16m1_t test_vrgather_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_tum( @@ -1921,7 +1921,7 @@ vuint16m1_t test_vrgather_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_u16m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_tum( @@ -1930,7 +1930,7 @@ vuint16m2_t test_vrgather_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_tum( @@ -1939,7 +1939,7 @@ vuint16m2_t test_vrgather_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_u16m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_tum( @@ -1948,7 +1948,7 @@ vuint16m4_t test_vrgather_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_tum( @@ -1957,7 +1957,7 @@ vuint16m4_t test_vrgather_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_u16m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_tum( @@ -1966,7 +1966,7 @@ vuint16m8_t test_vrgather_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tum( @@ -1975,7 +1975,7 @@ vuint16m8_t test_vrgather_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_u32mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tum( @@ -1984,7 +1984,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32mf2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32mf2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_tum( @@ -1993,7 +1993,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_u32m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_tum( @@ -2002,7 +2002,7 @@ vuint32m1_t test_vrgather_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_tum( @@ -2011,7 +2011,7 @@ vuint32m1_t test_vrgather_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_u32m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_tum( @@ -2020,7 +2020,7 @@ vuint32m2_t test_vrgather_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_tum( @@ -2029,7 +2029,7 @@ vuint32m2_t test_vrgather_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_u32m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_tum( @@ -2038,7 +2038,7 @@ vuint32m4_t test_vrgather_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_tum( @@ -2047,7 +2047,7 @@ vuint32m4_t test_vrgather_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_u32m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_tum( @@ -2056,7 +2056,7 @@ vuint32m8_t test_vrgather_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_tum( @@ -2065,7 +2065,7 @@ vuint32m8_t test_vrgather_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_u64m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_tum( @@ -2074,7 +2074,7 @@ vuint64m1_t test_vrgather_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m1_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m1_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_tum( @@ -2083,7 +2083,7 @@ vuint64m1_t test_vrgather_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_u64m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_tum( @@ -2092,7 +2092,7 @@ vuint64m2_t test_vrgather_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m2_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m2_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_tum( @@ -2101,7 +2101,7 @@ vuint64m2_t test_vrgather_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_u64m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_tum( @@ -2110,7 +2110,7 @@ vuint64m4_t test_vrgather_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m4_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m4_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_tum( @@ -2119,7 +2119,7 @@ vuint64m4_t test_vrgather_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_u64m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_tum( @@ -2128,7 +2128,7 @@ vuint64m8_t test_vrgather_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m8_tum(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m8_tum(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_tumu( @@ -2137,7 +2137,7 @@ vuint64m8_t test_vrgather_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_f16mf4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16mf4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_tumu( @@ -2146,7 +2146,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16mf4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_tumu( @@ -2155,7 +2155,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_f16mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_tumu( @@ -2164,7 +2164,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_tumu( @@ -2173,7 +2173,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_f16m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_tumu( @@ -2182,7 +2182,7 @@ vfloat16m1_t test_vrgather_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_tumu( @@ -2191,7 +2191,7 @@ vfloat16m1_t test_vrgather_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_f16m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_tumu( @@ -2200,7 +2200,7 @@ vfloat16m2_t test_vrgather_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_tumu( @@ -2209,7 +2209,7 @@ vfloat16m2_t test_vrgather_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_f16m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_tumu( @@ -2218,7 +2218,7 @@ vfloat16m4_t test_vrgather_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_tumu( @@ -2227,7 +2227,7 @@ vfloat16m4_t test_vrgather_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_f16m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_tumu( @@ -2236,7 +2236,7 @@ vfloat16m8_t test_vrgather_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_tumu( @@ -2245,7 +2245,7 @@ vfloat16m8_t test_vrgather_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_f32mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_tumu( @@ -2254,7 +2254,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_tumu( @@ -2263,7 +2263,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_f32m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_tumu( @@ -2272,7 +2272,7 @@ vfloat32m1_t test_vrgather_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_tumu( @@ -2281,7 +2281,7 @@ vfloat32m1_t test_vrgather_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_f32m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_tumu( @@ -2290,7 +2290,7 @@ vfloat32m2_t test_vrgather_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_tumu( @@ -2299,7 +2299,7 @@ vfloat32m2_t test_vrgather_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_f32m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_tumu( @@ -2308,7 +2308,7 @@ vfloat32m4_t test_vrgather_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_tumu( @@ -2317,7 +2317,7 @@ vfloat32m4_t test_vrgather_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_f32m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_tumu( @@ -2326,7 +2326,7 @@ vfloat32m8_t test_vrgather_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_tumu( @@ -2335,7 +2335,7 @@ vfloat32m8_t test_vrgather_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_f64m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_tumu( @@ -2344,7 +2344,7 @@ vfloat64m1_t test_vrgather_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_tumu( @@ -2353,7 +2353,7 @@ vfloat64m1_t test_vrgather_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_f64m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_tumu( @@ -2362,7 +2362,7 @@ vfloat64m2_t test_vrgather_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_tumu( @@ -2371,7 +2371,7 @@ vfloat64m2_t test_vrgather_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_f64m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_tumu( @@ -2380,7 +2380,7 @@ vfloat64m4_t test_vrgather_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_tumu( @@ -2389,7 +2389,7 @@ vfloat64m4_t test_vrgather_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_f64m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_tumu( @@ -2398,7 +2398,7 @@ vfloat64m8_t test_vrgather_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_tumu( @@ -2407,7 +2407,7 @@ vfloat64m8_t test_vrgather_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_i8mf8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_tumu( @@ -2416,7 +2416,7 @@ vint8mf8_t test_vrgather_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_tumu( @@ -2425,7 +2425,7 @@ vint8mf8_t test_vrgather_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_i8mf4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_tumu( @@ -2434,7 +2434,7 @@ vint8mf4_t test_vrgather_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_tumu( @@ -2443,7 +2443,7 @@ vint8mf4_t test_vrgather_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_i8mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_tumu( @@ -2452,7 +2452,7 @@ vint8mf2_t test_vrgather_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_tumu( @@ -2461,7 +2461,7 @@ vint8mf2_t test_vrgather_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_i8m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_tumu( @@ -2470,7 +2470,7 @@ vint8m1_t test_vrgather_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_tumu( @@ -2479,7 +2479,7 @@ vint8m1_t test_vrgather_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_i8m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_tumu( @@ -2488,7 +2488,7 @@ vint8m2_t test_vrgather_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_tumu( @@ -2497,7 +2497,7 @@ vint8m2_t test_vrgather_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_i8m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_tumu( @@ -2506,7 +2506,7 @@ vint8m4_t test_vrgather_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_tumu( @@ -2515,7 +2515,7 @@ vint8m4_t test_vrgather_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_i8m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_tumu( @@ -2524,7 +2524,7 @@ vint8m8_t test_vrgather_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_tumu( @@ -2533,7 +2533,7 @@ vint8m8_t test_vrgather_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_i16mf4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16mf4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_tumu( @@ -2542,7 +2542,7 @@ vint16mf4_t test_vrgather_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16mf4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_tumu( @@ -2551,7 +2551,7 @@ vint16mf4_t test_vrgather_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_i16mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_tumu( @@ -2560,7 +2560,7 @@ vint16mf2_t test_vrgather_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_tumu( @@ -2569,7 +2569,7 @@ vint16mf2_t test_vrgather_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_i16m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_tumu( @@ -2578,7 +2578,7 @@ vint16m1_t test_vrgather_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_tumu( @@ -2587,7 +2587,7 @@ vint16m1_t test_vrgather_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_i16m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_tumu( @@ -2596,7 +2596,7 @@ vint16m2_t test_vrgather_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_tumu( @@ -2605,7 +2605,7 @@ vint16m2_t test_vrgather_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_i16m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_tumu( @@ -2614,7 +2614,7 @@ vint16m4_t test_vrgather_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_tumu( @@ -2623,7 +2623,7 @@ vint16m4_t test_vrgather_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_i16m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_tumu( @@ -2632,7 +2632,7 @@ vint16m8_t test_vrgather_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_tumu( @@ -2641,7 +2641,7 @@ vint16m8_t test_vrgather_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_i32mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_tumu( @@ -2650,7 +2650,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_tumu( @@ -2659,7 +2659,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_i32m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_tumu( @@ -2668,7 +2668,7 @@ vint32m1_t test_vrgather_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_tumu( @@ -2677,7 +2677,7 @@ vint32m1_t test_vrgather_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_i32m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_tumu( @@ -2686,7 +2686,7 @@ vint32m2_t test_vrgather_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_tumu( @@ -2695,7 +2695,7 @@ vint32m2_t test_vrgather_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_i32m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_tumu( @@ -2704,7 +2704,7 @@ vint32m4_t test_vrgather_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_tumu( @@ -2713,7 +2713,7 @@ vint32m4_t test_vrgather_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_i32m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_tumu( @@ -2722,7 +2722,7 @@ vint32m8_t test_vrgather_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_tumu( @@ -2731,7 +2731,7 @@ vint32m8_t test_vrgather_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_i64m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_tumu( @@ -2740,7 +2740,7 @@ vint64m1_t test_vrgather_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_tumu( @@ -2749,7 +2749,7 @@ vint64m1_t test_vrgather_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_i64m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_tumu( @@ -2758,7 +2758,7 @@ vint64m2_t test_vrgather_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_tumu( @@ -2767,7 +2767,7 @@ vint64m2_t test_vrgather_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_i64m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_tumu( @@ -2776,7 +2776,7 @@ vint64m4_t test_vrgather_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_tumu( @@ -2785,7 +2785,7 @@ vint64m4_t test_vrgather_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_i64m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_tumu( @@ -2794,7 +2794,7 @@ vint64m8_t test_vrgather_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_tumu( @@ -2803,7 +2803,7 @@ vint64m8_t test_vrgather_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_u8mf8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_tumu( @@ -2812,7 +2812,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_tumu( @@ -2821,7 +2821,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_u8mf4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_tumu( @@ -2830,7 +2830,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_tumu( @@ -2839,7 +2839,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_u8mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_tumu( @@ -2848,7 +2848,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_tumu( @@ -2857,7 +2857,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_u8m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_tumu( @@ -2866,7 +2866,7 @@ vuint8m1_t test_vrgather_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_tumu( @@ -2875,7 +2875,7 @@ vuint8m1_t test_vrgather_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_u8m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_tumu( @@ -2884,7 +2884,7 @@ vuint8m2_t test_vrgather_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_tumu( @@ -2893,7 +2893,7 @@ vuint8m2_t test_vrgather_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_u8m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_tumu( @@ -2902,7 +2902,7 @@ vuint8m4_t test_vrgather_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_tumu( @@ -2911,7 +2911,7 @@ vuint8m4_t test_vrgather_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_u8m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_tumu( @@ -2920,7 +2920,7 @@ vuint8m8_t test_vrgather_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_tumu( @@ -2929,7 +2929,7 @@ vuint8m8_t test_vrgather_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_u16mf4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16mf4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_tumu( @@ -2938,7 +2938,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16mf4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_tumu( @@ -2947,7 +2947,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_u16mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_tumu( @@ -2956,7 +2956,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_tumu( @@ -2965,7 +2965,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_u16m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_tumu( @@ -2974,7 +2974,7 @@ vuint16m1_t test_vrgather_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_tumu( @@ -2983,7 +2983,7 @@ vuint16m1_t test_vrgather_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_u16m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_tumu( @@ -2992,7 +2992,7 @@ vuint16m2_t test_vrgather_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_tumu( @@ -3001,7 +3001,7 @@ vuint16m2_t test_vrgather_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_u16m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_tumu( @@ -3010,7 +3010,7 @@ vuint16m4_t test_vrgather_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_tumu( @@ -3019,7 +3019,7 @@ vuint16m4_t test_vrgather_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_u16m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_tumu( @@ -3028,7 +3028,7 @@ vuint16m8_t test_vrgather_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_tumu( @@ -3037,7 +3037,7 @@ vuint16m8_t test_vrgather_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_u32mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_tumu( @@ -3046,7 +3046,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32mf2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32mf2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_tumu( @@ -3055,7 +3055,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_u32m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_tumu( @@ -3064,7 +3064,7 @@ vuint32m1_t test_vrgather_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_tumu( @@ -3073,7 +3073,7 @@ vuint32m1_t test_vrgather_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_u32m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_tumu( @@ -3082,7 +3082,7 @@ vuint32m2_t test_vrgather_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_tumu( @@ -3091,7 +3091,7 @@ vuint32m2_t test_vrgather_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_u32m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_tumu( @@ -3100,7 +3100,7 @@ vuint32m4_t test_vrgather_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_tumu( @@ -3109,7 +3109,7 @@ vuint32m4_t test_vrgather_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_u32m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_tumu( @@ -3118,7 +3118,7 @@ vuint32m8_t test_vrgather_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_tumu( @@ -3127,7 +3127,7 @@ vuint32m8_t test_vrgather_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_u64m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_tumu( @@ -3136,7 +3136,7 @@ vuint64m1_t test_vrgather_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m1_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m1_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_tumu( @@ -3145,7 +3145,7 @@ vuint64m1_t test_vrgather_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_u64m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_tumu( @@ -3154,7 +3154,7 @@ vuint64m2_t test_vrgather_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m2_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m2_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_tumu( @@ -3163,7 +3163,7 @@ vuint64m2_t test_vrgather_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_u64m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_tumu( @@ -3172,7 +3172,7 @@ vuint64m4_t test_vrgather_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m4_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m4_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_tumu( @@ -3181,7 +3181,7 @@ vuint64m4_t test_vrgather_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_u64m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_tumu( @@ -3190,7 +3190,7 @@ vuint64m8_t test_vrgather_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m8_tumu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m8_tumu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_mu( @@ -3199,7 +3199,7 @@ vuint64m8_t test_vrgather_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_f16mf4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16mf4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_mu( @@ -3208,7 +3208,7 @@ vfloat16mf4_t test_vrgather_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgather_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16mf4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_mu( @@ -3217,7 +3217,7 @@ vfloat16mf4_t test_vrgather_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_f16mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_mu( @@ -3226,7 +3226,7 @@ vfloat16mf2_t test_vrgather_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgather_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_mu( @@ -3235,7 +3235,7 @@ vfloat16mf2_t test_vrgather_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_f16m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_mu( @@ -3244,7 +3244,7 @@ vfloat16m1_t test_vrgather_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgather_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_mu( @@ -3253,7 +3253,7 @@ vfloat16m1_t test_vrgather_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_f16m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_mu( @@ -3262,7 +3262,7 @@ vfloat16m2_t test_vrgather_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgather_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_mu( @@ -3271,7 +3271,7 @@ vfloat16m2_t test_vrgather_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_f16m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_mu( @@ -3280,7 +3280,7 @@ vfloat16m4_t test_vrgather_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgather_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_mu( @@ -3289,7 +3289,7 @@ vfloat16m4_t test_vrgather_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_f16m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f16m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_mu( @@ -3298,7 +3298,7 @@ vfloat16m8_t test_vrgather_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgather_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f16m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_mu( @@ -3307,7 +3307,7 @@ vfloat16m8_t test_vrgather_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_f32mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_mu( @@ -3316,7 +3316,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_mu( @@ -3325,7 +3325,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_f32m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_mu( @@ -3334,7 +3334,7 @@ vfloat32m1_t test_vrgather_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_mu( @@ -3343,7 +3343,7 @@ vfloat32m1_t test_vrgather_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_f32m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_mu( @@ -3352,7 +3352,7 @@ vfloat32m2_t test_vrgather_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_mu( @@ -3361,7 +3361,7 @@ vfloat32m2_t test_vrgather_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_f32m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_mu( @@ -3370,7 +3370,7 @@ vfloat32m4_t test_vrgather_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_mu( @@ -3379,7 +3379,7 @@ vfloat32m4_t test_vrgather_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_f32m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f32m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_mu( @@ -3388,7 +3388,7 @@ vfloat32m8_t test_vrgather_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f32m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f32m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_mu( @@ -3397,7 +3397,7 @@ vfloat32m8_t test_vrgather_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_f64m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_mu( @@ -3406,7 +3406,7 @@ vfloat64m1_t test_vrgather_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_mu( @@ -3415,7 +3415,7 @@ vfloat64m1_t test_vrgather_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_f64m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_mu( @@ -3424,7 +3424,7 @@ vfloat64m2_t test_vrgather_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_mu( @@ -3433,7 +3433,7 @@ vfloat64m2_t test_vrgather_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_f64m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_mu( @@ -3442,7 +3442,7 @@ vfloat64m4_t test_vrgather_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_mu( @@ -3451,7 +3451,7 @@ vfloat64m4_t test_vrgather_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_f64m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_f64m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_mu( @@ -3460,7 +3460,7 @@ vfloat64m8_t test_vrgather_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f64m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_f64m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_mu( @@ -3469,7 +3469,7 @@ vfloat64m8_t test_vrgather_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_i8mf8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_mu( @@ -3478,7 +3478,7 @@ vint8mf8_t test_vrgather_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_mu( @@ -3487,7 +3487,7 @@ vint8mf8_t test_vrgather_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_i8mf4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_mu( @@ -3496,7 +3496,7 @@ vint8mf4_t test_vrgather_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_mu( @@ -3505,7 +3505,7 @@ vint8mf4_t test_vrgather_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_i8mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_mu( @@ -3514,7 +3514,7 @@ vint8mf2_t test_vrgather_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_mu( @@ -3523,7 +3523,7 @@ vint8mf2_t test_vrgather_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_i8m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_mu( @@ -3532,7 +3532,7 @@ vint8m1_t test_vrgather_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_mu( @@ -3541,7 +3541,7 @@ vint8m1_t test_vrgather_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_i8m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_mu( @@ -3550,7 +3550,7 @@ vint8m2_t test_vrgather_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_mu( @@ -3559,7 +3559,7 @@ vint8m2_t test_vrgather_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_i8m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_mu( @@ -3568,7 +3568,7 @@ vint8m4_t test_vrgather_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_mu( @@ -3577,7 +3577,7 @@ vint8m4_t test_vrgather_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_i8m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i8m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_mu( @@ -3586,7 +3586,7 @@ vint8m8_t test_vrgather_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i8m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i8m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_mu( @@ -3595,7 +3595,7 @@ vint8m8_t test_vrgather_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_i16mf4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16mf4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_mu( @@ -3604,7 +3604,7 @@ vint16mf4_t test_vrgather_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16mf4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_mu( @@ -3613,7 +3613,7 @@ vint16mf4_t test_vrgather_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_i16mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_mu( @@ -3622,7 +3622,7 @@ vint16mf2_t test_vrgather_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_mu( @@ -3631,7 +3631,7 @@ vint16mf2_t test_vrgather_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_i16m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_mu( @@ -3640,7 +3640,7 @@ vint16m1_t test_vrgather_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_mu( @@ -3649,7 +3649,7 @@ vint16m1_t test_vrgather_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_i16m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_mu( @@ -3658,7 +3658,7 @@ vint16m2_t test_vrgather_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_mu( @@ -3667,7 +3667,7 @@ vint16m2_t test_vrgather_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_i16m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_mu( @@ -3676,7 +3676,7 @@ vint16m4_t test_vrgather_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_mu( @@ -3685,7 +3685,7 @@ vint16m4_t test_vrgather_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_i16m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i16m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_mu( @@ -3694,7 +3694,7 @@ vint16m8_t test_vrgather_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i16m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i16m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_mu( @@ -3703,7 +3703,7 @@ vint16m8_t test_vrgather_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_i32mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_mu( @@ -3712,7 +3712,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_mu( @@ -3721,7 +3721,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_i32m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_mu( @@ -3730,7 +3730,7 @@ vint32m1_t test_vrgather_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_mu( @@ -3739,7 +3739,7 @@ vint32m1_t test_vrgather_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_i32m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_mu( @@ -3748,7 +3748,7 @@ vint32m2_t test_vrgather_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_mu( @@ -3757,7 +3757,7 @@ vint32m2_t test_vrgather_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_i32m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_mu( @@ -3766,7 +3766,7 @@ vint32m4_t test_vrgather_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_mu( @@ -3775,7 +3775,7 @@ vint32m4_t test_vrgather_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_i32m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i32m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_mu( @@ -3784,7 +3784,7 @@ vint32m8_t test_vrgather_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i32m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i32m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_mu( @@ -3793,7 +3793,7 @@ vint32m8_t test_vrgather_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_i64m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_mu( @@ -3802,7 +3802,7 @@ vint64m1_t test_vrgather_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_mu( @@ -3811,7 +3811,7 @@ vint64m1_t test_vrgather_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_i64m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_mu( @@ -3820,7 +3820,7 @@ vint64m2_t test_vrgather_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_mu( @@ -3829,7 +3829,7 @@ vint64m2_t test_vrgather_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_i64m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_mu( @@ -3838,7 +3838,7 @@ vint64m4_t test_vrgather_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_mu( @@ -3847,7 +3847,7 @@ vint64m4_t test_vrgather_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_i64m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_i64m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_mu( @@ -3856,7 +3856,7 @@ vint64m8_t test_vrgather_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_i64m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_i64m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_mu( @@ -3865,7 +3865,7 @@ vint64m8_t test_vrgather_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather_vv_u8mf8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_mu( @@ -3874,7 +3874,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_mu( @@ -3883,7 +3883,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather_vv_u8mf4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_mu( @@ -3892,7 +3892,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_mu( @@ -3901,7 +3901,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather_vv_u8mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_mu( @@ -3910,7 +3910,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_mu( @@ -3919,7 +3919,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather_vv_u8m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_mu( @@ -3928,7 +3928,7 @@ vuint8m1_t test_vrgather_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_mu( @@ -3937,7 +3937,7 @@ vuint8m1_t test_vrgather_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather_vv_u8m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_mu( @@ -3946,7 +3946,7 @@ vuint8m2_t test_vrgather_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_mu( @@ -3955,7 +3955,7 @@ vuint8m2_t test_vrgather_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather_vv_u8m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_mu( @@ -3964,7 +3964,7 @@ vuint8m4_t test_vrgather_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_mu( @@ -3973,7 +3973,7 @@ vuint8m4_t test_vrgather_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather_vv_u8m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u8m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_mu( @@ -3982,7 +3982,7 @@ vuint8m8_t test_vrgather_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u8m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u8m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_mu( @@ -3991,7 +3991,7 @@ vuint8m8_t test_vrgather_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_u16mf4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16mf4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_mu( @@ -4000,7 +4000,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16mf4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_mu( @@ -4009,7 +4009,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_u16mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_mu( @@ -4018,7 +4018,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_mu( @@ -4027,7 +4027,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_u16m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_mu( @@ -4036,7 +4036,7 @@ vuint16m1_t test_vrgather_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_mu( @@ -4045,7 +4045,7 @@ vuint16m1_t test_vrgather_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_u16m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_mu( @@ -4054,7 +4054,7 @@ vuint16m2_t test_vrgather_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_mu( @@ -4063,7 +4063,7 @@ vuint16m2_t test_vrgather_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_u16m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_mu( @@ -4072,7 +4072,7 @@ vuint16m4_t test_vrgather_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_mu( @@ -4081,7 +4081,7 @@ vuint16m4_t test_vrgather_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_u16m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u16m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_mu( @@ -4090,7 +4090,7 @@ vuint16m8_t test_vrgather_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u16m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u16m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_mu( @@ -4099,7 +4099,7 @@ vuint16m8_t test_vrgather_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather_vv_u32mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_mu( @@ -4108,7 +4108,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32mf2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32mf2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_mu( @@ -4117,7 +4117,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather_vv_u32m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_mu( @@ -4126,7 +4126,7 @@ vuint32m1_t test_vrgather_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_mu( @@ -4135,7 +4135,7 @@ vuint32m1_t test_vrgather_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather_vv_u32m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_mu( @@ -4144,7 +4144,7 @@ vuint32m2_t test_vrgather_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_mu( @@ -4153,7 +4153,7 @@ vuint32m2_t test_vrgather_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather_vv_u32m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_mu( @@ -4162,7 +4162,7 @@ vuint32m4_t test_vrgather_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_mu( @@ -4171,7 +4171,7 @@ vuint32m4_t test_vrgather_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather_vv_u32m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u32m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_mu( @@ -4180,7 +4180,7 @@ vuint32m8_t test_vrgather_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u32m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u32m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_mu( @@ -4189,7 +4189,7 @@ vuint32m8_t test_vrgather_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather_vv_u64m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_mu( @@ -4198,7 +4198,7 @@ vuint64m1_t test_vrgather_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m1_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m1_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_mu( @@ -4207,7 +4207,7 @@ vuint64m1_t test_vrgather_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather_vv_u64m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_mu( @@ -4216,7 +4216,7 @@ vuint64m2_t test_vrgather_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m2_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m2_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_mu( @@ -4225,7 +4225,7 @@ vuint64m2_t test_vrgather_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather_vv_u64m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_mu( @@ -4234,7 +4234,7 @@ vuint64m4_t test_vrgather_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m4_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m4_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_mu( @@ -4243,7 +4243,7 @@ vuint64m4_t test_vrgather_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather_vv_u64m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vv_u64m8_mu(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_mu( @@ -4252,6 +4252,6 @@ vuint64m8_t test_vrgather_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return vrgather_vx_u64m8_mu(mask, maskedoff, op1, index, vl); + return __riscv_vrgather_vx_u64m8_mu(mask, maskedoff, op1, index, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgatherei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgatherei16.c index 1f3a2a71b325..8232eba6e11d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgatherei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgatherei16.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgatherei16_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgatherei16_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgatherei16_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgatherei16_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_f16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_tu( @@ -148,7 +148,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_tu( @@ -157,7 +157,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_tu( @@ -166,7 +166,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_tu( @@ -175,7 +175,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_tu( @@ -184,7 +184,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_tu( @@ -193,7 +193,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_tu( @@ -202,7 +202,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_tu( @@ -211,7 +211,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_tu( @@ -220,7 +220,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_tu( @@ -229,7 +229,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_tu( @@ -238,7 +238,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_tu( @@ -247,7 +247,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tu( @@ -256,7 +256,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_tu( @@ -265,7 +265,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_tu( @@ -274,7 +274,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_tu( @@ -283,7 +283,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_tu( @@ -292,7 +292,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_tu( @@ -301,7 +301,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_tu( @@ -310,7 +310,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_tu( @@ -319,7 +319,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_tu( @@ -328,7 +328,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_tu( @@ -337,7 +337,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_tu( @@ -346,7 +346,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_tu( @@ -355,7 +355,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_tu( @@ -364,7 +364,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_tu( @@ -373,7 +373,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_tu( @@ -382,7 +382,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_tu( @@ -391,7 +391,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_tu( @@ -400,7 +400,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_tu( @@ -409,7 +409,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_tu( @@ -418,7 +418,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_tu( @@ -427,7 +427,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_tu( @@ -436,7 +436,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tu( @@ -445,7 +445,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_tu( @@ -454,7 +454,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_tu( @@ -463,7 +463,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_tu( @@ -472,7 +472,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_tu( @@ -481,7 +481,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_tu( @@ -490,7 +490,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_tu( @@ -499,7 +499,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_tu( @@ -508,7 +508,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_tu( @@ -517,7 +517,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_tum( @@ -526,7 +526,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_tum( @@ -535,7 +535,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_tum( @@ -544,7 +544,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgatherei16_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_tum( @@ -553,7 +553,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgatherei16_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_tum( @@ -562,7 +562,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgatherei16_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_tum( @@ -571,7 +571,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgatherei16_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tum( @@ -580,7 +580,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_tum( @@ -589,7 +589,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t mask // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_tum( @@ -598,7 +598,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_tum( @@ -607,7 +607,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_tum( @@ -616,7 +616,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_tum( @@ -625,7 +625,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_tum( @@ -634,7 +634,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_tum( @@ -643,7 +643,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_tum( @@ -652,7 +652,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_tum( @@ -661,7 +661,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_tum( @@ -670,7 +670,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_tum( @@ -679,7 +679,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_tum( @@ -688,7 +688,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_tum( @@ -697,7 +697,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_tum( @@ -706,7 +706,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_tum( @@ -715,7 +715,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_tum( @@ -724,7 +724,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_tum( @@ -733,7 +733,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_tum( @@ -742,7 +742,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_tum( @@ -751,7 +751,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_tum( @@ -760,7 +760,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tum( @@ -769,7 +769,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_tum( @@ -778,7 +778,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_tum( @@ -787,7 +787,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_tum( @@ -796,7 +796,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_tum( @@ -805,7 +805,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_tum( @@ -814,7 +814,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_tum( @@ -823,7 +823,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_tum( @@ -832,7 +832,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_tum( @@ -841,7 +841,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_tum( @@ -850,7 +850,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_tum( @@ -859,7 +859,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_tum( @@ -868,7 +868,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_tum( @@ -877,7 +877,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_tum( @@ -886,7 +886,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_tum( @@ -895,7 +895,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_tum( @@ -904,7 +904,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_tum( @@ -913,7 +913,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_tum( @@ -922,7 +922,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_tum( @@ -931,7 +931,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_tum( @@ -940,7 +940,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_tum( @@ -949,7 +949,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tum( @@ -958,7 +958,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_tum( @@ -967,7 +967,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_tum( @@ -976,7 +976,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_tum( @@ -985,7 +985,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_tum( @@ -994,7 +994,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_tum( @@ -1003,7 +1003,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_tum( @@ -1012,7 +1012,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_tum( @@ -1021,7 +1021,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_tum( @@ -1030,7 +1030,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_tumu( @@ -1039,7 +1039,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_tumu( @@ -1048,7 +1048,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_tumu( @@ -1057,7 +1057,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgatherei16_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_tumu( @@ -1066,7 +1066,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgatherei16_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_tumu( @@ -1075,7 +1075,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgatherei16_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_tumu( @@ -1084,7 +1084,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgatherei16_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_tumu( @@ -1093,7 +1093,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_tumu( @@ -1102,7 +1102,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t mas // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_tumu( @@ -1111,7 +1111,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_tumu( @@ -1120,7 +1120,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_tumu( @@ -1129,7 +1129,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_tumu( @@ -1138,7 +1138,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_tumu( @@ -1147,7 +1147,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_tumu( @@ -1156,7 +1156,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_tumu( @@ -1165,7 +1165,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_tumu( @@ -1174,7 +1174,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_tumu( @@ -1183,7 +1183,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_tumu( @@ -1192,7 +1192,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_tumu( @@ -1201,7 +1201,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_tumu( @@ -1210,7 +1210,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_tumu( @@ -1219,7 +1219,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_tumu( @@ -1228,7 +1228,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_tumu( @@ -1237,7 +1237,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_tumu( @@ -1246,7 +1246,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_tumu( @@ -1255,7 +1255,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_tumu( @@ -1264,7 +1264,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_tumu( @@ -1273,7 +1273,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_tumu( @@ -1282,7 +1282,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_tumu( @@ -1291,7 +1291,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_tumu( @@ -1300,7 +1300,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_tumu( @@ -1309,7 +1309,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_tumu( @@ -1318,7 +1318,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_tumu( @@ -1327,7 +1327,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_tumu( @@ -1336,7 +1336,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_tumu( @@ -1345,7 +1345,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_tumu( @@ -1354,7 +1354,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_tumu( @@ -1363,7 +1363,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_tumu( @@ -1372,7 +1372,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_tumu( @@ -1381,7 +1381,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_tumu( @@ -1390,7 +1390,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_tumu( @@ -1399,7 +1399,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_tumu( @@ -1408,7 +1408,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_tumu( @@ -1417,7 +1417,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_tumu( @@ -1426,7 +1426,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_tumu( @@ -1435,7 +1435,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_tumu( @@ -1444,7 +1444,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_tumu( @@ -1453,7 +1453,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_tumu( @@ -1462,7 +1462,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_tumu( @@ -1471,7 +1471,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_tumu( @@ -1480,7 +1480,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_tumu( @@ -1489,7 +1489,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_tumu( @@ -1498,7 +1498,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_tumu( @@ -1507,7 +1507,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_tumu( @@ -1516,7 +1516,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_tumu( @@ -1525,7 +1525,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_tumu( @@ -1534,7 +1534,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_tumu( @@ -1543,7 +1543,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_mu( @@ -1552,7 +1552,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_mu( @@ -1561,7 +1561,7 @@ vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_mu( @@ -1570,7 +1570,7 @@ vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vrgatherei16_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_mu( @@ -1579,7 +1579,7 @@ vfloat16m1_t test_vrgatherei16_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vrgatherei16_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_mu( @@ -1588,7 +1588,7 @@ vfloat16m2_t test_vrgatherei16_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vrgatherei16_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_mu( @@ -1597,7 +1597,7 @@ vfloat16m4_t test_vrgatherei16_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vrgatherei16_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_mu( @@ -1606,7 +1606,7 @@ vfloat16m8_t test_vrgatherei16_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_mu( @@ -1615,7 +1615,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_mu( @@ -1624,7 +1624,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_mu( @@ -1633,7 +1633,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_mu( @@ -1642,7 +1642,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_mu( @@ -1651,7 +1651,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_mu( @@ -1660,7 +1660,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_mu( @@ -1669,7 +1669,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_mu( @@ -1678,7 +1678,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_mu( @@ -1687,7 +1687,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_mu( @@ -1696,7 +1696,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_mu( @@ -1705,7 +1705,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_mu( @@ -1714,7 +1714,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_mu( @@ -1723,7 +1723,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_mu( @@ -1732,7 +1732,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_mu( @@ -1741,7 +1741,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_mu( @@ -1750,7 +1750,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_mu( @@ -1759,7 +1759,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_mu( @@ -1768,7 +1768,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_mu( @@ -1777,7 +1777,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_mu( @@ -1786,7 +1786,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_mu( @@ -1795,7 +1795,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_mu( @@ -1804,7 +1804,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_mu( @@ -1813,7 +1813,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_mu( @@ -1822,7 +1822,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_mu( @@ -1831,7 +1831,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_mu( @@ -1840,7 +1840,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_mu( @@ -1849,7 +1849,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_mu( @@ -1858,7 +1858,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_mu( @@ -1867,7 +1867,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_mu( @@ -1876,7 +1876,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_mu( @@ -1885,7 +1885,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_mu( @@ -1894,7 +1894,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_mu( @@ -1903,7 +1903,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_mu( @@ -1912,7 +1912,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_mu( @@ -1921,7 +1921,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_mu( @@ -1930,7 +1930,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_mu( @@ -1939,7 +1939,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_mu( @@ -1948,7 +1948,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_mu( @@ -1957,7 +1957,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_mu( @@ -1966,7 +1966,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_mu( @@ -1975,7 +1975,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_mu( @@ -1984,7 +1984,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_mu( @@ -1993,7 +1993,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_mu( @@ -2002,7 +2002,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_mu( @@ -2011,7 +2011,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_mu( @@ -2020,7 +2020,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_mu( @@ -2029,7 +2029,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_mu( @@ -2038,7 +2038,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_mu( @@ -2047,7 +2047,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_mu( @@ -2056,6 +2056,6 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrgatherei16_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrsub.c index c8b3956b8a95..6465e307f3cf 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrsub.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vrsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_tu( @@ -30,7 +30,7 @@ vint8mf4_t test_vrsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_tu( @@ -39,7 +39,7 @@ vint8mf2_t test_vrsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vrsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_tu( @@ -57,7 +57,7 @@ vint8m2_t test_vrsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_tu( @@ -66,7 +66,7 @@ vint8m4_t test_vrsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_tu( @@ -75,7 +75,7 @@ vint8m8_t test_vrsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_tu( @@ -84,7 +84,7 @@ vint16mf4_t test_vrsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_tu( @@ -93,7 +93,7 @@ vint16mf2_t test_vrsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vrsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_tu( @@ -111,7 +111,7 @@ vint16m2_t test_vrsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_tu( @@ -120,7 +120,7 @@ vint16m4_t test_vrsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tu( @@ -129,7 +129,7 @@ vint16m8_t test_vrsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_tu( @@ -138,7 +138,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vrsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_tu( @@ -156,7 +156,7 @@ vint32m2_t test_vrsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_tu( @@ -165,7 +165,7 @@ vint32m4_t test_vrsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_tu( @@ -174,7 +174,7 @@ vint32m8_t test_vrsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_tu( @@ -183,7 +183,7 @@ vint64m1_t test_vrsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_tu( @@ -192,7 +192,7 @@ vint64m2_t test_vrsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_tu( @@ -201,7 +201,7 @@ vint64m4_t test_vrsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_tu( @@ -210,7 +210,7 @@ vint64m8_t test_vrsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_tu( @@ -219,7 +219,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_tu( @@ -228,7 +228,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_tu( @@ -237,7 +237,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_tu( @@ -246,7 +246,7 @@ vuint8m1_t test_vrsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_tu( @@ -255,7 +255,7 @@ vuint8m2_t test_vrsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_tu( @@ -264,7 +264,7 @@ vuint8m4_t test_vrsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_tu( @@ -273,7 +273,7 @@ vuint8m8_t test_vrsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_tu( @@ -282,7 +282,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_tu( @@ -291,7 +291,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_tu( @@ -300,7 +300,7 @@ vuint16m1_t test_vrsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_tu( @@ -309,7 +309,7 @@ vuint16m2_t test_vrsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_tu( @@ -318,7 +318,7 @@ vuint16m4_t test_vrsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tu( @@ -327,7 +327,7 @@ vuint16m8_t test_vrsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_tu( @@ -336,7 +336,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_tu( @@ -345,7 +345,7 @@ vuint32m1_t test_vrsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_tu( @@ -354,7 +354,7 @@ vuint32m2_t test_vrsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_tu( @@ -363,7 +363,7 @@ vuint32m4_t test_vrsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_tu( @@ -372,7 +372,7 @@ vuint32m8_t test_vrsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_tu( @@ -381,7 +381,7 @@ vuint64m1_t test_vrsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_tu( @@ -390,7 +390,7 @@ vuint64m2_t test_vrsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m4_t test_vrsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vrsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vrsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_tum( @@ -426,7 +426,7 @@ vint8mf4_t test_vrsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_tum( @@ -435,7 +435,7 @@ vint8mf2_t test_vrsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_tum( @@ -444,7 +444,7 @@ vint8m1_t test_vrsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_tum( @@ -453,7 +453,7 @@ vint8m2_t test_vrsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_tum( @@ -462,7 +462,7 @@ vint8m4_t test_vrsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_tum( @@ -471,7 +471,7 @@ vint8m8_t test_vrsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_tum( @@ -480,7 +480,7 @@ vint16mf4_t test_vrsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_tum( @@ -489,7 +489,7 @@ vint16mf2_t test_vrsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_tum( @@ -498,7 +498,7 @@ vint16m1_t test_vrsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_tum( @@ -507,7 +507,7 @@ vint16m2_t test_vrsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_tum( @@ -516,7 +516,7 @@ vint16m4_t test_vrsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tum( @@ -525,7 +525,7 @@ vint16m8_t test_vrsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_tum( @@ -534,7 +534,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_tum( @@ -543,7 +543,7 @@ vint32m1_t test_vrsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_tum( @@ -552,7 +552,7 @@ vint32m2_t test_vrsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_tum( @@ -561,7 +561,7 @@ vint32m4_t test_vrsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_tum( @@ -570,7 +570,7 @@ vint32m8_t test_vrsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_tum( @@ -579,7 +579,7 @@ vint64m1_t test_vrsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_tum( @@ -588,7 +588,7 @@ vint64m2_t test_vrsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_tum( @@ -597,7 +597,7 @@ vint64m4_t test_vrsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_tum( @@ -606,7 +606,7 @@ vint64m8_t test_vrsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_tum( @@ -615,7 +615,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_tum( @@ -624,7 +624,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_tum( @@ -633,7 +633,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_tum( @@ -642,7 +642,7 @@ vuint8m1_t test_vrsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_tum( @@ -651,7 +651,7 @@ vuint8m2_t test_vrsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_tum( @@ -660,7 +660,7 @@ vuint8m4_t test_vrsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_tum( @@ -669,7 +669,7 @@ vuint8m8_t test_vrsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_tum( @@ -678,7 +678,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_tum( @@ -687,7 +687,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_tum( @@ -696,7 +696,7 @@ vuint16m1_t test_vrsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_tum( @@ -705,7 +705,7 @@ vuint16m2_t test_vrsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_tum( @@ -714,7 +714,7 @@ vuint16m4_t test_vrsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tum( @@ -723,7 +723,7 @@ vuint16m8_t test_vrsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_tum( @@ -732,7 +732,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_tum( @@ -741,7 +741,7 @@ vuint32m1_t test_vrsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_tum( @@ -750,7 +750,7 @@ vuint32m2_t test_vrsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_tum( @@ -759,7 +759,7 @@ vuint32m4_t test_vrsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_tum( @@ -768,7 +768,7 @@ vuint32m8_t test_vrsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_tum( @@ -777,7 +777,7 @@ vuint64m1_t test_vrsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_tum( @@ -786,7 +786,7 @@ vuint64m2_t test_vrsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m4_t test_vrsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vrsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vrsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_tumu( @@ -822,7 +822,7 @@ vint8mf4_t test_vrsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_tumu( @@ -831,7 +831,7 @@ vint8mf2_t test_vrsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_tumu( @@ -840,7 +840,7 @@ vint8m1_t test_vrsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_tumu( @@ -849,7 +849,7 @@ vint8m2_t test_vrsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_tumu( @@ -858,7 +858,7 @@ vint8m4_t test_vrsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_tumu( @@ -867,7 +867,7 @@ vint8m8_t test_vrsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_tumu( @@ -876,7 +876,7 @@ vint16mf4_t test_vrsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_tumu( @@ -885,7 +885,7 @@ vint16mf2_t test_vrsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_tumu( @@ -894,7 +894,7 @@ vint16m1_t test_vrsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_tumu( @@ -903,7 +903,7 @@ vint16m2_t test_vrsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_tumu( @@ -912,7 +912,7 @@ vint16m4_t test_vrsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_tumu( @@ -921,7 +921,7 @@ vint16m8_t test_vrsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_tumu( @@ -930,7 +930,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_tumu( @@ -939,7 +939,7 @@ vint32m1_t test_vrsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_tumu( @@ -948,7 +948,7 @@ vint32m2_t test_vrsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_tumu( @@ -957,7 +957,7 @@ vint32m4_t test_vrsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_tumu( @@ -966,7 +966,7 @@ vint32m8_t test_vrsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_tumu( @@ -975,7 +975,7 @@ vint64m1_t test_vrsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_tumu( @@ -984,7 +984,7 @@ vint64m2_t test_vrsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_tumu( @@ -993,7 +993,7 @@ vint64m4_t test_vrsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_tumu( @@ -1002,7 +1002,7 @@ vint64m8_t test_vrsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_tumu( @@ -1011,7 +1011,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_tumu( @@ -1020,7 +1020,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_tumu( @@ -1029,7 +1029,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_tumu( @@ -1038,7 +1038,7 @@ vuint8m1_t test_vrsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_tumu( @@ -1047,7 +1047,7 @@ vuint8m2_t test_vrsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_tumu( @@ -1056,7 +1056,7 @@ vuint8m4_t test_vrsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_tumu( @@ -1065,7 +1065,7 @@ vuint8m8_t test_vrsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_tumu( @@ -1074,7 +1074,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_tumu( @@ -1083,7 +1083,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_tumu( @@ -1092,7 +1092,7 @@ vuint16m1_t test_vrsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_tumu( @@ -1101,7 +1101,7 @@ vuint16m2_t test_vrsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_tumu( @@ -1110,7 +1110,7 @@ vuint16m4_t test_vrsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_tumu( @@ -1119,7 +1119,7 @@ vuint16m8_t test_vrsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_tumu( @@ -1128,7 +1128,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_tumu( @@ -1137,7 +1137,7 @@ vuint32m1_t test_vrsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_tumu( @@ -1146,7 +1146,7 @@ vuint32m2_t test_vrsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_tumu( @@ -1155,7 +1155,7 @@ vuint32m4_t test_vrsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_tumu( @@ -1164,7 +1164,7 @@ vuint32m8_t test_vrsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_tumu( @@ -1173,7 +1173,7 @@ vuint64m1_t test_vrsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_tumu( @@ -1182,7 +1182,7 @@ vuint64m2_t test_vrsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m4_t test_vrsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vrsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vrsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_mu( @@ -1218,7 +1218,7 @@ vint8mf4_t test_vrsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_mu( @@ -1227,7 +1227,7 @@ vint8mf2_t test_vrsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_mu( @@ -1236,7 +1236,7 @@ vint8m1_t test_vrsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_mu( @@ -1245,7 +1245,7 @@ vint8m2_t test_vrsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_mu( @@ -1254,7 +1254,7 @@ vint8m4_t test_vrsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrsub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_mu( @@ -1263,7 +1263,7 @@ vint8m8_t test_vrsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_mu( @@ -1272,7 +1272,7 @@ vint16mf4_t test_vrsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_mu( @@ -1281,7 +1281,7 @@ vint16mf2_t test_vrsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_mu( @@ -1290,7 +1290,7 @@ vint16m1_t test_vrsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_mu( @@ -1299,7 +1299,7 @@ vint16m2_t test_vrsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_mu( @@ -1308,7 +1308,7 @@ vint16m4_t test_vrsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrsub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_mu( @@ -1317,7 +1317,7 @@ vint16m8_t test_vrsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_mu( @@ -1326,7 +1326,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_mu( @@ -1335,7 +1335,7 @@ vint32m1_t test_vrsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_mu( @@ -1344,7 +1344,7 @@ vint32m2_t test_vrsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_mu( @@ -1353,7 +1353,7 @@ vint32m4_t test_vrsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrsub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_mu( @@ -1362,7 +1362,7 @@ vint32m8_t test_vrsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_mu( @@ -1371,7 +1371,7 @@ vint64m1_t test_vrsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_mu( @@ -1380,7 +1380,7 @@ vint64m2_t test_vrsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_mu( @@ -1389,7 +1389,7 @@ vint64m4_t test_vrsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrsub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_mu( @@ -1398,7 +1398,7 @@ vint64m8_t test_vrsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_mu( @@ -1407,7 +1407,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_mu( @@ -1416,7 +1416,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_mu( @@ -1425,7 +1425,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_mu( @@ -1434,7 +1434,7 @@ vuint8m1_t test_vrsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_mu( @@ -1443,7 +1443,7 @@ vuint8m2_t test_vrsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_mu( @@ -1452,7 +1452,7 @@ vuint8m4_t test_vrsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vrsub_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_mu( @@ -1461,7 +1461,7 @@ vuint8m8_t test_vrsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_mu( @@ -1470,7 +1470,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_mu( @@ -1479,7 +1479,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_mu( @@ -1488,7 +1488,7 @@ vuint16m1_t test_vrsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_mu( @@ -1497,7 +1497,7 @@ vuint16m2_t test_vrsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_mu( @@ -1506,7 +1506,7 @@ vuint16m4_t test_vrsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vrsub_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_mu( @@ -1515,7 +1515,7 @@ vuint16m8_t test_vrsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_mu( @@ -1524,7 +1524,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_mu( @@ -1533,7 +1533,7 @@ vuint32m1_t test_vrsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_mu( @@ -1542,7 +1542,7 @@ vuint32m2_t test_vrsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_mu( @@ -1551,7 +1551,7 @@ vuint32m4_t test_vrsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vrsub_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_mu( @@ -1560,7 +1560,7 @@ vuint32m8_t test_vrsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_mu( @@ -1569,7 +1569,7 @@ vuint64m1_t test_vrsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_mu( @@ -1578,7 +1578,7 @@ vuint64m2_t test_vrsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m4_t test_vrsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vrsub_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vrsub_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsadd.c index be64e2ea6945..bd523a9820a9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsadd.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsadd_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsadd_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsadd_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsadd_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsadd_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsadd_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsadd_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsadd_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsadd_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsadd_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsadd_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsadd_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsadd_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsadd_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsadd_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsadd_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsadd_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsadd_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsadd_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsadd_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsadd_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsadd_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsadd_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsadd_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsadd_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsadd_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsadd_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsadd_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsadd_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsadd_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsadd_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsadd_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsadd_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsadd_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsadd_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsadd_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsadd_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsadd_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsadd_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsadd_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsaddu.c index 68e2dc78e619..de82cf2d4de6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsaddu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsaddu_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsaddu_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsaddu_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsaddu_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsaddu_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsaddu_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsaddu_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_tu( @@ -138,7 +138,7 @@ vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsaddu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_tu( @@ -147,7 +147,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_tu( @@ -156,7 +156,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsaddu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_tu( @@ -165,7 +165,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_tu( @@ -174,7 +174,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsaddu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_tu( @@ -183,7 +183,7 @@ vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsaddu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_tu( @@ -210,7 +210,7 @@ vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsaddu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_tu( @@ -219,7 +219,7 @@ vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_tu( @@ -228,7 +228,7 @@ vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsaddu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_tu( @@ -237,7 +237,7 @@ vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsaddu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsaddu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsaddu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_tu( @@ -291,7 +291,7 @@ vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_tu( @@ -300,7 +300,7 @@ vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsaddu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_tu( @@ -309,7 +309,7 @@ vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_tu( @@ -318,7 +318,7 @@ vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsaddu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_tu( @@ -327,7 +327,7 @@ vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_tu( @@ -336,7 +336,7 @@ vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsaddu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_tu( @@ -345,7 +345,7 @@ vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_tu( @@ -354,7 +354,7 @@ vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsaddu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_tu( @@ -363,7 +363,7 @@ vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_tu( @@ -372,7 +372,7 @@ vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsaddu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_tu( @@ -381,7 +381,7 @@ vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_tu( @@ -390,7 +390,7 @@ vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsaddu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsaddu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_tum( @@ -417,7 +417,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_tum( @@ -426,7 +426,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsaddu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_tum( @@ -435,7 +435,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_tum( @@ -444,7 +444,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsaddu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_tum( @@ -453,7 +453,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_tum( @@ -462,7 +462,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsaddu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_tum( @@ -471,7 +471,7 @@ vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_tum( @@ -480,7 +480,7 @@ vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsaddu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_tum( @@ -489,7 +489,7 @@ vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_tum( @@ -498,7 +498,7 @@ vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsaddu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_tum( @@ -507,7 +507,7 @@ vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_tum( @@ -516,7 +516,7 @@ vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsaddu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_tum( @@ -525,7 +525,7 @@ vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_tum( @@ -534,7 +534,7 @@ vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsaddu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_tum( @@ -543,7 +543,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_tum( @@ -552,7 +552,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsaddu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_tum( @@ -561,7 +561,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_tum( @@ -570,7 +570,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsaddu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_tum( @@ -579,7 +579,7 @@ vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_tum( @@ -588,7 +588,7 @@ vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsaddu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_tum( @@ -597,7 +597,7 @@ vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_tum( @@ -606,7 +606,7 @@ vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsaddu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_tum( @@ -615,7 +615,7 @@ vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_tum( @@ -624,7 +624,7 @@ vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_tum( @@ -633,7 +633,7 @@ vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tum( @@ -642,7 +642,7 @@ vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsaddu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tum( @@ -651,7 +651,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_tum( @@ -660,7 +660,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsaddu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_tum( @@ -669,7 +669,7 @@ vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_tum( @@ -678,7 +678,7 @@ vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsaddu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_tum( @@ -687,7 +687,7 @@ vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_tum( @@ -696,7 +696,7 @@ vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsaddu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_tum( @@ -705,7 +705,7 @@ vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_tum( @@ -714,7 +714,7 @@ vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_tum( @@ -723,7 +723,7 @@ vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_tum( @@ -732,7 +732,7 @@ vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsaddu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_tum( @@ -741,7 +741,7 @@ vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_tum( @@ -750,7 +750,7 @@ vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsaddu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_tum( @@ -759,7 +759,7 @@ vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_tum( @@ -768,7 +768,7 @@ vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsaddu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_tum( @@ -777,7 +777,7 @@ vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_tum( @@ -786,7 +786,7 @@ vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsaddu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_tumu( @@ -813,7 +813,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_tumu( @@ -822,7 +822,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsaddu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_tumu( @@ -831,7 +831,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_tumu( @@ -840,7 +840,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsaddu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_tumu( @@ -849,7 +849,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_tumu( @@ -858,7 +858,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsaddu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_tumu( @@ -867,7 +867,7 @@ vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_tumu( @@ -876,7 +876,7 @@ vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsaddu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_tumu( @@ -885,7 +885,7 @@ vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_tumu( @@ -894,7 +894,7 @@ vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsaddu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_tumu( @@ -903,7 +903,7 @@ vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_tumu( @@ -912,7 +912,7 @@ vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsaddu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_tumu( @@ -921,7 +921,7 @@ vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_tumu( @@ -930,7 +930,7 @@ vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsaddu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_tumu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_tumu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsaddu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_tumu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_tumu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsaddu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_tumu( @@ -975,7 +975,7 @@ vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_tumu( @@ -984,7 +984,7 @@ vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsaddu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_tumu( @@ -993,7 +993,7 @@ vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_tumu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsaddu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_tumu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_tumu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_tumu( @@ -1029,7 +1029,7 @@ vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_tumu( @@ -1038,7 +1038,7 @@ vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsaddu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_tumu( @@ -1047,7 +1047,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_tumu( @@ -1056,7 +1056,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsaddu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_tumu( @@ -1065,7 +1065,7 @@ vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_tumu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsaddu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_tumu( @@ -1083,7 +1083,7 @@ vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_tumu( @@ -1092,7 +1092,7 @@ vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsaddu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_tumu( @@ -1101,7 +1101,7 @@ vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_tumu( @@ -1110,7 +1110,7 @@ vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_tumu( @@ -1119,7 +1119,7 @@ vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_tumu( @@ -1128,7 +1128,7 @@ vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsaddu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_tumu( @@ -1137,7 +1137,7 @@ vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_tumu( @@ -1146,7 +1146,7 @@ vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsaddu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_tumu( @@ -1155,7 +1155,7 @@ vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_tumu( @@ -1164,7 +1164,7 @@ vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsaddu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_tumu( @@ -1173,7 +1173,7 @@ vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_tumu( @@ -1182,7 +1182,7 @@ vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsaddu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_mu( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_mu( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsaddu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_mu( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_mu( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsaddu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_mu( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_mu( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsaddu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_mu( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_mu( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsaddu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_mu( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_mu( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsaddu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_mu( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_mu( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsaddu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_mu( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsaddu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_mu( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsaddu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_mu( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_mu( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsaddu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_mu( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_mu( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_mu( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_mu( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_mu( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_mu( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_mu( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_mu( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_mu( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_mu( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsaddu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_mu( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_mu( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_mu( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_mu( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_mu( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_mu( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_mu( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_mu( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_mu( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_mu( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_mu( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_mu( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_mu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_mu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_mu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_mu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsbc.c index 4af7869eaa1c..52d91696fea9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsbc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsbc.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsbc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_i8mf8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8mf8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vsbc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsbc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_i8mf8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8mf8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vsbc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsbc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_i8mf4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8mf4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vsbc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsbc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_i8mf4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8mf4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vsbc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsbc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_i8mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vsbc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsbc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_i8mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vsbc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsbc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_i8m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vsbc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsbc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_i8m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vsbc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsbc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_i8m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vsbc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsbc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_i8m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vsbc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsbc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vvm_i8m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vsbc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsbc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vxm_i8m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vsbc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsbc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) { - return vsbc_vvm_i8m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i8m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vsbc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsbc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) { - return vsbc_vxm_i8m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i8m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vsbc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsbc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_i16mf4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16mf4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vsbc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsbc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_i16mf4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16mf4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vsbc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsbc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_i16mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vsbc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsbc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_i16mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vsbc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsbc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_i16m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vsbc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsbc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_i16m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vsbc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsbc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_i16m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vsbc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsbc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_i16m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vsbc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsbc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_i16m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vsbc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsbc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_i16m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vsbc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsbc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vvm_i16m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i16m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vsbc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsbc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vxm_i16m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i16m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vsbc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_i32mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i32mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_i32mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i32mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsbc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_i32m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i32m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vsbc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsbc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_i32m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i32m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vsbc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsbc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_i32m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i32m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vsbc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsbc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_i32m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i32m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vsbc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsbc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_i32m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i32m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vsbc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsbc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_i32m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i32m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vsbc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsbc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_i32m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i32m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vsbc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsbc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_i32m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i32m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vsbc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsbc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_i64m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i64m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vsbc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsbc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_i64m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i64m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vsbc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsbc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_i64m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i64m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vsbc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsbc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_i64m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i64m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vsbc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsbc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_i64m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i64m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vsbc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsbc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_i64m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i64m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vsbc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsbc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_i64m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_i64m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vsbc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsbc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_i64m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_i64m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8_tu( @@ -408,7 +408,7 @@ vint64m8_t test_vsbc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsbc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_u8mf8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8mf8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8_tu( @@ -417,7 +417,7 @@ vuint8mf8_t test_vsbc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsbc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_u8mf8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8mf8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4_tu( @@ -426,7 +426,7 @@ vuint8mf8_t test_vsbc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsbc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_u8mf4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8mf4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4_tu( @@ -435,7 +435,7 @@ vuint8mf4_t test_vsbc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsbc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_u8mf4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8mf4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2_tu( @@ -444,7 +444,7 @@ vuint8mf4_t test_vsbc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsbc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_u8mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2_tu( @@ -453,7 +453,7 @@ vuint8mf2_t test_vsbc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsbc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_u8mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1_tu( @@ -462,7 +462,7 @@ vuint8mf2_t test_vsbc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsbc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_u8m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1_tu( @@ -471,7 +471,7 @@ vuint8m1_t test_vsbc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsbc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_u8m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2_tu( @@ -480,7 +480,7 @@ vuint8m1_t test_vsbc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsbc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_u8m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2_tu( @@ -489,7 +489,7 @@ vuint8m2_t test_vsbc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsbc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_u8m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4_tu( @@ -498,7 +498,7 @@ vuint8m2_t test_vsbc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsbc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vvm_u8m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4_tu( @@ -507,7 +507,7 @@ vuint8m4_t test_vsbc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsbc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vxm_u8m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8_tu( @@ -516,7 +516,7 @@ vuint8m4_t test_vsbc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsbc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) { - return vsbc_vvm_u8m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u8m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8_tu( @@ -525,7 +525,7 @@ vuint8m8_t test_vsbc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsbc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) { - return vsbc_vxm_u8m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u8m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4_tu( @@ -534,7 +534,7 @@ vuint8m8_t test_vsbc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsbc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_u16mf4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16mf4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4_tu( @@ -543,7 +543,7 @@ vuint16mf4_t test_vsbc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsbc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_u16mf4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16mf4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2_tu( @@ -552,7 +552,7 @@ vuint16mf4_t test_vsbc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsbc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_u16mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2_tu( @@ -561,7 +561,7 @@ vuint16mf2_t test_vsbc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsbc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_u16mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1_tu( @@ -570,7 +570,7 @@ vuint16mf2_t test_vsbc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsbc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_u16m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1_tu( @@ -579,7 +579,7 @@ vuint16m1_t test_vsbc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsbc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_u16m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2_tu( @@ -588,7 +588,7 @@ vuint16m1_t test_vsbc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsbc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_u16m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2_tu( @@ -597,7 +597,7 @@ vuint16m2_t test_vsbc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsbc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_u16m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4_tu( @@ -606,7 +606,7 @@ vuint16m2_t test_vsbc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsbc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_u16m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4_tu( @@ -615,7 +615,7 @@ vuint16m4_t test_vsbc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsbc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_u16m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8_tu( @@ -624,7 +624,7 @@ vuint16m4_t test_vsbc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsbc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vvm_u16m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u16m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8_tu( @@ -633,7 +633,7 @@ vuint16m8_t test_vsbc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsbc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) { - return vsbc_vxm_u16m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u16m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2_tu( @@ -642,7 +642,7 @@ vuint16m8_t test_vsbc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_u32mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u32mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2_tu( @@ -651,7 +651,7 @@ vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_u32mf2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u32mf2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1_tu( @@ -660,7 +660,7 @@ vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsbc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_u32m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u32m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1_tu( @@ -669,7 +669,7 @@ vuint32m1_t test_vsbc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsbc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_u32m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u32m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2_tu( @@ -678,7 +678,7 @@ vuint32m1_t test_vsbc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsbc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_u32m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u32m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2_tu( @@ -687,7 +687,7 @@ vuint32m2_t test_vsbc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsbc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_u32m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u32m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4_tu( @@ -696,7 +696,7 @@ vuint32m2_t test_vsbc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsbc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_u32m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u32m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4_tu( @@ -705,7 +705,7 @@ vuint32m4_t test_vsbc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsbc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_u32m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u32m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8_tu( @@ -714,7 +714,7 @@ vuint32m4_t test_vsbc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsbc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vvm_u32m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u32m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8_tu( @@ -723,7 +723,7 @@ vuint32m8_t test_vsbc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsbc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) { - return vsbc_vxm_u32m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u32m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1_tu( @@ -732,7 +732,7 @@ vuint32m8_t test_vsbc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsbc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vvm_u64m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u64m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1_tu( @@ -741,7 +741,7 @@ vuint64m1_t test_vsbc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsbc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) { - return vsbc_vxm_u64m1_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u64m1_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2_tu( @@ -750,7 +750,7 @@ vuint64m1_t test_vsbc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsbc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vvm_u64m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u64m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2_tu( @@ -759,7 +759,7 @@ vuint64m2_t test_vsbc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsbc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) { - return vsbc_vxm_u64m2_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u64m2_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4_tu( @@ -768,7 +768,7 @@ vuint64m2_t test_vsbc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsbc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vvm_u64m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u64m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4_tu( @@ -777,7 +777,7 @@ vuint64m4_t test_vsbc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsbc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) { - return vsbc_vxm_u64m4_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u64m4_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8_tu( @@ -786,7 +786,7 @@ vuint64m4_t test_vsbc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsbc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vvm_u64m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vvm_u64m8_tu(maskedoff, op1, op2, borrowin, vl); } // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8_tu( @@ -795,6 +795,6 @@ vuint64m8_t test_vsbc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsbc_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) { - return vsbc_vxm_u64m8_tu(maskedoff, op1, op2, borrowin, vl); + return __riscv_vsbc_vxm_u64m8_tu(maskedoff, op1, op2, borrowin, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext.c index 3459a2e0d6db..d92d678f36fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf2_i16mf4_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_tu( @@ -21,7 +21,7 @@ vint16mf4_t test_vsext_vf2_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf2_i16mf2_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_tu( @@ -30,7 +30,7 @@ vint16mf2_t test_vsext_vf2_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf2_i16m1_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_tu( @@ -39,7 +39,7 @@ vint16m1_t test_vsext_vf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf2_i16m2_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_tu( @@ -48,7 +48,7 @@ vint16m2_t test_vsext_vf2_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf2_i16m4_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_tu( @@ -57,7 +57,7 @@ vint16m4_t test_vsext_vf2_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return vsext_vf2_i16m8_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tu( @@ -66,7 +66,7 @@ vint16m8_t test_vsext_vf2_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf4_i32mf2_tu(maskedoff, op1, vl); + return __riscv_vsext_vf4_i32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tu( @@ -75,7 +75,7 @@ vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t maskedoff, vint8mf8_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf4_i32m1_tu(maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tu( @@ -84,7 +84,7 @@ vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t maskedoff, vint8mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf4_i32m2_tu(maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tu( @@ -93,7 +93,7 @@ vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t maskedoff, vint8mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf4_i32m4_tu(maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tu( @@ -102,7 +102,7 @@ vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t maskedoff, vint8m1_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf4_i32m8_tu(maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tu( @@ -111,7 +111,7 @@ vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t maskedoff, vint8m2_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf8_i64m1_tu(maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tu( @@ -120,7 +120,7 @@ vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf8_i64m2_tu(maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tu( @@ -129,7 +129,7 @@ vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf8_i64m4_tu(maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tu( @@ -138,7 +138,7 @@ vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf8_i64m8_tu(maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tu( @@ -147,7 +147,7 @@ vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf2_i32mf2_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_tu( @@ -156,7 +156,7 @@ vint32mf2_t test_vsext_vf2_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf2_i32m1_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_tu( @@ -165,7 +165,7 @@ vint32m1_t test_vsext_vf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf2_i32m2_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_tu( @@ -174,7 +174,7 @@ vint32m2_t test_vsext_vf2_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf2_i32m4_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_tu( @@ -183,7 +183,7 @@ vint32m4_t test_vsext_vf2_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return vsext_vf2_i32m8_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tu( @@ -192,7 +192,7 @@ vint32m8_t test_vsext_vf2_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf4_i64m1_tu(maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tu( @@ -201,7 +201,7 @@ vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t maskedoff, vint16mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf4_i64m2_tu(maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tu( @@ -210,7 +210,7 @@ vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t maskedoff, vint16mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf4_i64m4_tu(maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tu( @@ -219,7 +219,7 @@ vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t maskedoff, vint16m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf4_i64m8_tu(maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tu( @@ -228,7 +228,7 @@ vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t maskedoff, vint16m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return vsext_vf2_i64m1_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_tu( @@ -237,7 +237,7 @@ vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return vsext_vf2_i64m2_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_tu( @@ -246,7 +246,7 @@ vint64m2_t test_vsext_vf2_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return vsext_vf2_i64m4_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_tu( @@ -255,7 +255,7 @@ vint64m4_t test_vsext_vf2_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return vsext_vf2_i64m8_tu(maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_tum( @@ -264,7 +264,7 @@ vint64m8_t test_vsext_vf2_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf2_i16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_tum( @@ -273,7 +273,7 @@ vint16mf4_t test_vsext_vf2_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf2_i16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_tum( @@ -282,7 +282,7 @@ vint16mf2_t test_vsext_vf2_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf2_i16m1_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_tum( @@ -291,7 +291,7 @@ vint16m1_t test_vsext_vf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf2_i16m2_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_tum( @@ -300,7 +300,7 @@ vint16m2_t test_vsext_vf2_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf2_i16m4_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_tum( @@ -309,7 +309,7 @@ vint16m4_t test_vsext_vf2_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return vsext_vf2_i16m8_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tum( @@ -318,7 +318,7 @@ vint16m8_t test_vsext_vf2_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf4_i32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tum( @@ -327,7 +327,7 @@ vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf4_i32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tum( @@ -336,7 +336,7 @@ vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf4_i32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tum( @@ -345,7 +345,7 @@ vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf4_i32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tum( @@ -354,7 +354,7 @@ vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf4_i32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tum( @@ -363,7 +363,7 @@ vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf8_i64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tum( @@ -372,7 +372,7 @@ vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf8_i64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tum( @@ -381,7 +381,7 @@ vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf8_i64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tum( @@ -390,7 +390,7 @@ vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf8_i64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tum( @@ -399,7 +399,7 @@ vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf2_i32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_tum( @@ -408,7 +408,7 @@ vint32mf2_t test_vsext_vf2_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf2_i32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_tum( @@ -417,7 +417,7 @@ vint32m1_t test_vsext_vf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf2_i32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_tum( @@ -426,7 +426,7 @@ vint32m2_t test_vsext_vf2_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf2_i32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_tum( @@ -435,7 +435,7 @@ vint32m4_t test_vsext_vf2_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return vsext_vf2_i32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tum( @@ -444,7 +444,7 @@ vint32m8_t test_vsext_vf2_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf4_i64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tum( @@ -453,7 +453,7 @@ vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf4_i64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tum( @@ -462,7 +462,7 @@ vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf4_i64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tum( @@ -471,7 +471,7 @@ vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf4_i64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tum( @@ -480,7 +480,7 @@ vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return vsext_vf2_i64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_tum( @@ -489,7 +489,7 @@ vint64m1_t test_vsext_vf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return vsext_vf2_i64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_tum( @@ -498,7 +498,7 @@ vint64m2_t test_vsext_vf2_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return vsext_vf2_i64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_tum( @@ -507,7 +507,7 @@ vint64m4_t test_vsext_vf2_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return vsext_vf2_i64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_tumu( @@ -516,7 +516,7 @@ vint64m8_t test_vsext_vf2_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf2_i16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_tumu( @@ -525,7 +525,7 @@ vint16mf4_t test_vsext_vf2_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf2_i16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_tumu( @@ -534,7 +534,7 @@ vint16mf2_t test_vsext_vf2_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf2_i16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_tumu( @@ -543,7 +543,7 @@ vint16m1_t test_vsext_vf2_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf2_i16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_tumu( @@ -552,7 +552,7 @@ vint16m2_t test_vsext_vf2_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf2_i16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_tumu( @@ -561,7 +561,7 @@ vint16m4_t test_vsext_vf2_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return vsext_vf2_i16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tumu( @@ -570,7 +570,7 @@ vint16m8_t test_vsext_vf2_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf4_i32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tumu( @@ -579,7 +579,7 @@ vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf4_i32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tumu( @@ -588,7 +588,7 @@ vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf4_i32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tumu( @@ -597,7 +597,7 @@ vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf4_i32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tumu( @@ -606,7 +606,7 @@ vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf4_i32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tumu( @@ -615,7 +615,7 @@ vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf8_i64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tumu( @@ -624,7 +624,7 @@ vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf8_i64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tumu( @@ -633,7 +633,7 @@ vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf8_i64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tumu( @@ -642,7 +642,7 @@ vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf8_i64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tumu( @@ -651,7 +651,7 @@ vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf2_i32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_tumu( @@ -660,7 +660,7 @@ vint32mf2_t test_vsext_vf2_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf2_i32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_tumu( @@ -669,7 +669,7 @@ vint32m1_t test_vsext_vf2_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf2_i32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_tumu( @@ -678,7 +678,7 @@ vint32m2_t test_vsext_vf2_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf2_i32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_tumu( @@ -687,7 +687,7 @@ vint32m4_t test_vsext_vf2_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return vsext_vf2_i32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tumu( @@ -696,7 +696,7 @@ vint32m8_t test_vsext_vf2_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf4_i64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tumu( @@ -705,7 +705,7 @@ vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf4_i64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tumu( @@ -714,7 +714,7 @@ vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf4_i64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tumu( @@ -723,7 +723,7 @@ vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf4_i64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tumu( @@ -732,7 +732,7 @@ vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return vsext_vf2_i64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_tumu( @@ -741,7 +741,7 @@ vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return vsext_vf2_i64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_tumu( @@ -750,7 +750,7 @@ vint64m2_t test_vsext_vf2_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return vsext_vf2_i64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_tumu( @@ -759,7 +759,7 @@ vint64m4_t test_vsext_vf2_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return vsext_vf2_i64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_mu( @@ -768,7 +768,7 @@ vint64m8_t test_vsext_vf2_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf2_i16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_mu( @@ -777,7 +777,7 @@ vint16mf4_t test_vsext_vf2_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf2_i16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_mu( @@ -786,7 +786,7 @@ vint16mf2_t test_vsext_vf2_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf2_i16m1_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_mu( @@ -795,7 +795,7 @@ vint16m1_t test_vsext_vf2_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf2_i16m2_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_mu( @@ -804,7 +804,7 @@ vint16m2_t test_vsext_vf2_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf2_i16m4_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_mu( @@ -813,7 +813,7 @@ vint16m4_t test_vsext_vf2_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return vsext_vf2_i16m8_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_mu( @@ -822,7 +822,7 @@ vint16m8_t test_vsext_vf2_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf4_i32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_mu( @@ -831,7 +831,7 @@ vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf4_i32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_mu( @@ -840,7 +840,7 @@ vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf4_i32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_mu( @@ -849,7 +849,7 @@ vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf4_i32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_mu( @@ -858,7 +858,7 @@ vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf4_i32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_mu( @@ -867,7 +867,7 @@ vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf8_i64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_mu( @@ -876,7 +876,7 @@ vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf8_i64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_mu( @@ -885,7 +885,7 @@ vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf8_i64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_mu( @@ -894,7 +894,7 @@ vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf8_i64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf8_i64m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_mu( @@ -903,7 +903,7 @@ vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf2_i32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_mu( @@ -912,7 +912,7 @@ vint32mf2_t test_vsext_vf2_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf2_i32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_mu( @@ -921,7 +921,7 @@ vint32m1_t test_vsext_vf2_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf2_i32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_mu( @@ -930,7 +930,7 @@ vint32m2_t test_vsext_vf2_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf2_i32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_mu( @@ -939,7 +939,7 @@ vint32m4_t test_vsext_vf2_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return vsext_vf2_i32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_mu( @@ -948,7 +948,7 @@ vint32m8_t test_vsext_vf2_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf4_i64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_mu( @@ -957,7 +957,7 @@ vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf4_i64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_mu( @@ -966,7 +966,7 @@ vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf4_i64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_mu( @@ -975,7 +975,7 @@ vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf4_i64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf4_i64m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_mu( @@ -984,7 +984,7 @@ vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return vsext_vf2_i64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_mu( @@ -993,7 +993,7 @@ vint64m1_t test_vsext_vf2_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return vsext_vf2_i64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_mu( @@ -1002,7 +1002,7 @@ vint64m2_t test_vsext_vf2_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return vsext_vf2_i64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_mu( @@ -1011,6 +1011,6 @@ vint64m4_t test_vsext_vf2_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return vsext_vf2_i64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vsext_vf2_i64m8_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslide1down.c index 4585f27e5a50..8cb70b71a2d4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslide1down.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf8_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_tu( @@ -30,7 +30,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_tu( @@ -39,7 +39,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vslide1down_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_tu( @@ -57,7 +57,7 @@ vint8m2_t test_vslide1down_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_tu( @@ -66,7 +66,7 @@ vint8m4_t test_vslide1down_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_tu( @@ -75,7 +75,7 @@ vint8m8_t test_vslide1down_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16mf4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_tu( @@ -84,7 +84,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_tu( @@ -93,7 +93,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vslide1down_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_tu( @@ -111,7 +111,7 @@ vint16m2_t test_vslide1down_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_tu( @@ -120,7 +120,7 @@ vint16m4_t test_vslide1down_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tu( @@ -129,7 +129,7 @@ vint16m8_t test_vslide1down_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_tu( @@ -138,7 +138,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vslide1down_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_tu( @@ -156,7 +156,7 @@ vint32m2_t test_vslide1down_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_tu( @@ -165,7 +165,7 @@ vint32m4_t test_vslide1down_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_tu( @@ -174,7 +174,7 @@ vint32m8_t test_vslide1down_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_tu( @@ -183,7 +183,7 @@ vint64m1_t test_vslide1down_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_tu( @@ -192,7 +192,7 @@ vint64m2_t test_vslide1down_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_tu( @@ -201,7 +201,7 @@ vint64m4_t test_vslide1down_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_tu( @@ -210,7 +210,7 @@ vint64m8_t test_vslide1down_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, in // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf8_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_tu( @@ -219,7 +219,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_tu( @@ -228,7 +228,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_tu( @@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_tu( @@ -246,7 +246,7 @@ vuint8m1_t test_vslide1down_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_tu( @@ -255,7 +255,7 @@ vuint8m2_t test_vslide1down_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_tu( @@ -264,7 +264,7 @@ vuint8m4_t test_vslide1down_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_tu( @@ -273,7 +273,7 @@ vuint8m8_t test_vslide1down_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16mf4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_tu( @@ -282,7 +282,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_tu( @@ -291,7 +291,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_tu( @@ -300,7 +300,7 @@ vuint16m1_t test_vslide1down_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_tu( @@ -309,7 +309,7 @@ vuint16m2_t test_vslide1down_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_tu( @@ -318,7 +318,7 @@ vuint16m4_t test_vslide1down_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tu( @@ -327,7 +327,7 @@ vuint16m8_t test_vslide1down_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_tu( @@ -336,7 +336,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_tu( @@ -345,7 +345,7 @@ vuint32m1_t test_vslide1down_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_tu( @@ -354,7 +354,7 @@ vuint32m2_t test_vslide1down_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_tu( @@ -363,7 +363,7 @@ vuint32m4_t test_vslide1down_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_tu( @@ -372,7 +372,7 @@ vuint32m8_t test_vslide1down_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_tu( @@ -381,7 +381,7 @@ vuint64m1_t test_vslide1down_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_tu( @@ -390,7 +390,7 @@ vuint64m2_t test_vslide1down_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m4_t test_vslide1down_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vslide1down_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_tum( @@ -426,7 +426,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_tum( @@ -435,7 +435,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_tum( @@ -444,7 +444,7 @@ vint8m1_t test_vslide1down_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_tum( @@ -453,7 +453,7 @@ vint8m2_t test_vslide1down_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_tum( @@ -462,7 +462,7 @@ vint8m4_t test_vslide1down_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_tum( @@ -471,7 +471,7 @@ vint8m8_t test_vslide1down_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16mf4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_tum( @@ -480,7 +480,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_tum( @@ -489,7 +489,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_tum( @@ -498,7 +498,7 @@ vint16m1_t test_vslide1down_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_tum( @@ -507,7 +507,7 @@ vint16m2_t test_vslide1down_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_tum( @@ -516,7 +516,7 @@ vint16m4_t test_vslide1down_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tum( @@ -525,7 +525,7 @@ vint16m8_t test_vslide1down_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_tum( @@ -534,7 +534,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_tum( @@ -543,7 +543,7 @@ vint32m1_t test_vslide1down_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_tum( @@ -552,7 +552,7 @@ vint32m2_t test_vslide1down_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_tum( @@ -561,7 +561,7 @@ vint32m4_t test_vslide1down_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_tum( @@ -570,7 +570,7 @@ vint32m8_t test_vslide1down_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_tum( @@ -579,7 +579,7 @@ vint64m1_t test_vslide1down_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_tum( @@ -588,7 +588,7 @@ vint64m2_t test_vslide1down_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_tum( @@ -597,7 +597,7 @@ vint64m4_t test_vslide1down_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_tum( @@ -606,7 +606,7 @@ vint64m8_t test_vslide1down_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_tum( @@ -615,7 +615,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_tum( @@ -624,7 +624,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_tum( @@ -633,7 +633,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_tum( @@ -642,7 +642,7 @@ vuint8m1_t test_vslide1down_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_tum( @@ -651,7 +651,7 @@ vuint8m2_t test_vslide1down_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_tum( @@ -660,7 +660,7 @@ vuint8m4_t test_vslide1down_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_tum( @@ -669,7 +669,7 @@ vuint8m8_t test_vslide1down_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16mf4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_tum( @@ -678,7 +678,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_tum( @@ -687,7 +687,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_tum( @@ -696,7 +696,7 @@ vuint16m1_t test_vslide1down_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_tum( @@ -705,7 +705,7 @@ vuint16m2_t test_vslide1down_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_tum( @@ -714,7 +714,7 @@ vuint16m4_t test_vslide1down_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tum( @@ -723,7 +723,7 @@ vuint16m8_t test_vslide1down_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_tum( @@ -732,7 +732,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_tum( @@ -741,7 +741,7 @@ vuint32m1_t test_vslide1down_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_tum( @@ -750,7 +750,7 @@ vuint32m2_t test_vslide1down_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_tum( @@ -759,7 +759,7 @@ vuint32m4_t test_vslide1down_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_tum( @@ -768,7 +768,7 @@ vuint32m8_t test_vslide1down_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_tum( @@ -777,7 +777,7 @@ vuint64m1_t test_vslide1down_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_tum( @@ -786,7 +786,7 @@ vuint64m2_t test_vslide1down_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m4_t test_vslide1down_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vslide1down_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_tumu( @@ -822,7 +822,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_tumu( @@ -831,7 +831,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_tumu( @@ -840,7 +840,7 @@ vint8m1_t test_vslide1down_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_tumu( @@ -849,7 +849,7 @@ vint8m2_t test_vslide1down_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_tumu( @@ -858,7 +858,7 @@ vint8m4_t test_vslide1down_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_tumu( @@ -867,7 +867,7 @@ vint8m8_t test_vslide1down_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16mf4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_tumu( @@ -876,7 +876,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_tumu( @@ -885,7 +885,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_tumu( @@ -894,7 +894,7 @@ vint16m1_t test_vslide1down_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_tumu( @@ -903,7 +903,7 @@ vint16m2_t test_vslide1down_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_tumu( @@ -912,7 +912,7 @@ vint16m4_t test_vslide1down_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_tumu( @@ -921,7 +921,7 @@ vint16m8_t test_vslide1down_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_tumu( @@ -930,7 +930,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_tumu( @@ -939,7 +939,7 @@ vint32m1_t test_vslide1down_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_tumu( @@ -948,7 +948,7 @@ vint32m2_t test_vslide1down_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_tumu( @@ -957,7 +957,7 @@ vint32m4_t test_vslide1down_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_tumu( @@ -966,7 +966,7 @@ vint32m8_t test_vslide1down_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_tumu( @@ -975,7 +975,7 @@ vint64m1_t test_vslide1down_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_tumu( @@ -984,7 +984,7 @@ vint64m2_t test_vslide1down_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_tumu( @@ -993,7 +993,7 @@ vint64m4_t test_vslide1down_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_tumu( @@ -1002,7 +1002,7 @@ vint64m8_t test_vslide1down_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_tumu( @@ -1011,7 +1011,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_tumu( @@ -1020,7 +1020,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_tumu( @@ -1029,7 +1029,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_tumu( @@ -1038,7 +1038,7 @@ vuint8m1_t test_vslide1down_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_tumu( @@ -1047,7 +1047,7 @@ vuint8m2_t test_vslide1down_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_tumu( @@ -1056,7 +1056,7 @@ vuint8m4_t test_vslide1down_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_tumu( @@ -1065,7 +1065,7 @@ vuint8m8_t test_vslide1down_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16mf4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_tumu( @@ -1074,7 +1074,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_tumu( @@ -1083,7 +1083,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_tumu( @@ -1092,7 +1092,7 @@ vuint16m1_t test_vslide1down_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_tumu( @@ -1101,7 +1101,7 @@ vuint16m2_t test_vslide1down_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_tumu( @@ -1110,7 +1110,7 @@ vuint16m4_t test_vslide1down_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_tumu( @@ -1119,7 +1119,7 @@ vuint16m8_t test_vslide1down_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_tumu( @@ -1128,7 +1128,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_tumu( @@ -1137,7 +1137,7 @@ vuint32m1_t test_vslide1down_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_tumu( @@ -1146,7 +1146,7 @@ vuint32m2_t test_vslide1down_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_tumu( @@ -1155,7 +1155,7 @@ vuint32m4_t test_vslide1down_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_tumu( @@ -1164,7 +1164,7 @@ vuint32m8_t test_vslide1down_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_tumu( @@ -1173,7 +1173,7 @@ vuint64m1_t test_vslide1down_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_tumu( @@ -1182,7 +1182,7 @@ vuint64m2_t test_vslide1down_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m4_t test_vslide1down_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vslide1down_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_mu( @@ -1218,7 +1218,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_mu( @@ -1227,7 +1227,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_mu( @@ -1236,7 +1236,7 @@ vint8m1_t test_vslide1down_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_mu( @@ -1245,7 +1245,7 @@ vint8m2_t test_vslide1down_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_mu( @@ -1254,7 +1254,7 @@ vint8m4_t test_vslide1down_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1down_vx_i8m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i8m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_mu( @@ -1263,7 +1263,7 @@ vint8m8_t test_vslide1down_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16mf4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_mu( @@ -1272,7 +1272,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_mu( @@ -1281,7 +1281,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_mu( @@ -1290,7 +1290,7 @@ vint16m1_t test_vslide1down_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_mu( @@ -1299,7 +1299,7 @@ vint16m2_t test_vslide1down_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_mu( @@ -1308,7 +1308,7 @@ vint16m4_t test_vslide1down_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1down_vx_i16m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i16m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_mu( @@ -1317,7 +1317,7 @@ vint16m8_t test_vslide1down_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_mu( @@ -1326,7 +1326,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_mu( @@ -1335,7 +1335,7 @@ vint32m1_t test_vslide1down_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_mu( @@ -1344,7 +1344,7 @@ vint32m2_t test_vslide1down_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_mu( @@ -1353,7 +1353,7 @@ vint32m4_t test_vslide1down_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1down_vx_i32m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i32m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_mu( @@ -1362,7 +1362,7 @@ vint32m8_t test_vslide1down_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_mu( @@ -1371,7 +1371,7 @@ vint64m1_t test_vslide1down_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_mu( @@ -1380,7 +1380,7 @@ vint64m2_t test_vslide1down_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_mu( @@ -1389,7 +1389,7 @@ vint64m4_t test_vslide1down_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1down_vx_i64m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_i64m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_mu( @@ -1398,7 +1398,7 @@ vint64m8_t test_vslide1down_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_mu( @@ -1407,7 +1407,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_mu( @@ -1416,7 +1416,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_mu( @@ -1425,7 +1425,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_mu( @@ -1434,7 +1434,7 @@ vuint8m1_t test_vslide1down_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_mu( @@ -1443,7 +1443,7 @@ vuint8m2_t test_vslide1down_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_mu( @@ -1452,7 +1452,7 @@ vuint8m4_t test_vslide1down_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1down_vx_u8m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u8m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_mu( @@ -1461,7 +1461,7 @@ vuint8m8_t test_vslide1down_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16mf4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_mu( @@ -1470,7 +1470,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_mu( @@ -1479,7 +1479,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_mu( @@ -1488,7 +1488,7 @@ vuint16m1_t test_vslide1down_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_mu( @@ -1497,7 +1497,7 @@ vuint16m2_t test_vslide1down_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_mu( @@ -1506,7 +1506,7 @@ vuint16m4_t test_vslide1down_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1down_vx_u16m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u16m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_mu( @@ -1515,7 +1515,7 @@ vuint16m8_t test_vslide1down_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_mu( @@ -1524,7 +1524,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_mu( @@ -1533,7 +1533,7 @@ vuint32m1_t test_vslide1down_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_mu( @@ -1542,7 +1542,7 @@ vuint32m2_t test_vslide1down_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_mu( @@ -1551,7 +1551,7 @@ vuint32m4_t test_vslide1down_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1down_vx_u32m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u32m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_mu( @@ -1560,7 +1560,7 @@ vuint32m8_t test_vslide1down_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_mu( @@ -1569,7 +1569,7 @@ vuint64m1_t test_vslide1down_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_mu( @@ -1578,7 +1578,7 @@ vuint64m2_t test_vslide1down_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m4_t test_vslide1down_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1down_vx_u64m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1down_vx_u64m8_mu(mask, maskedoff, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslide1up.c index ada227889796..9d6eb0515b54 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslide1up.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf8_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_tu( @@ -30,7 +30,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_tu( @@ -39,7 +39,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_tu( @@ -48,7 +48,7 @@ vint8m1_t test_vslide1up_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_tu( @@ -57,7 +57,7 @@ vint8m2_t test_vslide1up_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_tu( @@ -66,7 +66,7 @@ vint8m4_t test_vslide1up_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_tu( @@ -75,7 +75,7 @@ vint8m8_t test_vslide1up_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16mf4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_tu( @@ -84,7 +84,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_tu( @@ -93,7 +93,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vslide1up_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_tu( @@ -111,7 +111,7 @@ vint16m2_t test_vslide1up_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_tu( @@ -120,7 +120,7 @@ vint16m4_t test_vslide1up_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tu( @@ -129,7 +129,7 @@ vint16m8_t test_vslide1up_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_tu( @@ -138,7 +138,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vslide1up_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_tu( @@ -156,7 +156,7 @@ vint32m2_t test_vslide1up_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_tu( @@ -165,7 +165,7 @@ vint32m4_t test_vslide1up_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_tu( @@ -174,7 +174,7 @@ vint32m8_t test_vslide1up_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_tu( @@ -183,7 +183,7 @@ vint64m1_t test_vslide1up_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, int6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_tu( @@ -192,7 +192,7 @@ vint64m2_t test_vslide1up_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, int6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_tu( @@ -201,7 +201,7 @@ vint64m4_t test_vslide1up_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, int6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_tu( @@ -210,7 +210,7 @@ vint64m8_t test_vslide1up_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, int6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf8_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_tu( @@ -219,7 +219,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_tu( @@ -228,7 +228,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_tu( @@ -237,7 +237,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_tu( @@ -246,7 +246,7 @@ vuint8m1_t test_vslide1up_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_tu( @@ -255,7 +255,7 @@ vuint8m2_t test_vslide1up_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_tu( @@ -264,7 +264,7 @@ vuint8m4_t test_vslide1up_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_tu( @@ -273,7 +273,7 @@ vuint8m8_t test_vslide1up_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16mf4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_tu( @@ -282,7 +282,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_tu( @@ -291,7 +291,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_tu( @@ -300,7 +300,7 @@ vuint16m1_t test_vslide1up_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_tu( @@ -309,7 +309,7 @@ vuint16m2_t test_vslide1up_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_tu( @@ -318,7 +318,7 @@ vuint16m4_t test_vslide1up_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tu( @@ -327,7 +327,7 @@ vuint16m8_t test_vslide1up_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32mf2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32mf2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_tu( @@ -336,7 +336,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_tu( @@ -345,7 +345,7 @@ vuint32m1_t test_vslide1up_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_tu( @@ -354,7 +354,7 @@ vuint32m2_t test_vslide1up_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_tu( @@ -363,7 +363,7 @@ vuint32m4_t test_vslide1up_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_tu( @@ -372,7 +372,7 @@ vuint32m8_t test_vslide1up_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m1_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m1_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_tu( @@ -381,7 +381,7 @@ vuint64m1_t test_vslide1up_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m2_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m2_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_tu( @@ -390,7 +390,7 @@ vuint64m2_t test_vslide1up_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m4_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m4_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m4_t test_vslide1up_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m8_tu(maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m8_tu(maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vslide1up_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, u // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_tum( @@ -426,7 +426,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_tum( @@ -435,7 +435,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_tum( @@ -444,7 +444,7 @@ vint8m1_t test_vslide1up_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_tum( @@ -453,7 +453,7 @@ vint8m2_t test_vslide1up_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_tum( @@ -462,7 +462,7 @@ vint8m4_t test_vslide1up_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_tum( @@ -471,7 +471,7 @@ vint8m8_t test_vslide1up_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16mf4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_tum( @@ -480,7 +480,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_tum( @@ -489,7 +489,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_tum( @@ -498,7 +498,7 @@ vint16m1_t test_vslide1up_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_tum( @@ -507,7 +507,7 @@ vint16m2_t test_vslide1up_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_tum( @@ -516,7 +516,7 @@ vint16m4_t test_vslide1up_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tum( @@ -525,7 +525,7 @@ vint16m8_t test_vslide1up_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_tum( @@ -534,7 +534,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_tum( @@ -543,7 +543,7 @@ vint32m1_t test_vslide1up_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_tum( @@ -552,7 +552,7 @@ vint32m2_t test_vslide1up_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_tum( @@ -561,7 +561,7 @@ vint32m4_t test_vslide1up_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_tum( @@ -570,7 +570,7 @@ vint32m8_t test_vslide1up_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_tum( @@ -579,7 +579,7 @@ vint64m1_t test_vslide1up_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_tum( @@ -588,7 +588,7 @@ vint64m2_t test_vslide1up_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_tum( @@ -597,7 +597,7 @@ vint64m4_t test_vslide1up_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_tum( @@ -606,7 +606,7 @@ vint64m8_t test_vslide1up_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_tum( @@ -615,7 +615,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_tum( @@ -624,7 +624,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_tum( @@ -633,7 +633,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_tum( @@ -642,7 +642,7 @@ vuint8m1_t test_vslide1up_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_tum( @@ -651,7 +651,7 @@ vuint8m2_t test_vslide1up_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_tum( @@ -660,7 +660,7 @@ vuint8m4_t test_vslide1up_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_tum( @@ -669,7 +669,7 @@ vuint8m8_t test_vslide1up_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16mf4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_tum( @@ -678,7 +678,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_tum( @@ -687,7 +687,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_tum( @@ -696,7 +696,7 @@ vuint16m1_t test_vslide1up_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_tum( @@ -705,7 +705,7 @@ vuint16m2_t test_vslide1up_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_tum( @@ -714,7 +714,7 @@ vuint16m4_t test_vslide1up_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tum( @@ -723,7 +723,7 @@ vuint16m8_t test_vslide1up_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32mf2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32mf2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_tum( @@ -732,7 +732,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_tum( @@ -741,7 +741,7 @@ vuint32m1_t test_vslide1up_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_tum( @@ -750,7 +750,7 @@ vuint32m2_t test_vslide1up_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_tum( @@ -759,7 +759,7 @@ vuint32m4_t test_vslide1up_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_tum( @@ -768,7 +768,7 @@ vuint32m8_t test_vslide1up_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m1_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m1_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_tum( @@ -777,7 +777,7 @@ vuint64m1_t test_vslide1up_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m2_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m2_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_tum( @@ -786,7 +786,7 @@ vuint64m2_t test_vslide1up_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m4_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m4_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m4_t test_vslide1up_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m8_tum(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m8_tum(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vslide1up_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_tumu( @@ -822,7 +822,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_tumu( @@ -831,7 +831,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_tumu( @@ -840,7 +840,7 @@ vint8m1_t test_vslide1up_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_tumu( @@ -849,7 +849,7 @@ vint8m2_t test_vslide1up_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_tumu( @@ -858,7 +858,7 @@ vint8m4_t test_vslide1up_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_tumu( @@ -867,7 +867,7 @@ vint8m8_t test_vslide1up_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16mf4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_tumu( @@ -876,7 +876,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_tumu( @@ -885,7 +885,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_tumu( @@ -894,7 +894,7 @@ vint16m1_t test_vslide1up_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_tumu( @@ -903,7 +903,7 @@ vint16m2_t test_vslide1up_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_tumu( @@ -912,7 +912,7 @@ vint16m4_t test_vslide1up_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_tumu( @@ -921,7 +921,7 @@ vint16m8_t test_vslide1up_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_tumu( @@ -930,7 +930,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_tumu( @@ -939,7 +939,7 @@ vint32m1_t test_vslide1up_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_tumu( @@ -948,7 +948,7 @@ vint32m2_t test_vslide1up_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_tumu( @@ -957,7 +957,7 @@ vint32m4_t test_vslide1up_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_tumu( @@ -966,7 +966,7 @@ vint32m8_t test_vslide1up_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_tumu( @@ -975,7 +975,7 @@ vint64m1_t test_vslide1up_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_tumu( @@ -984,7 +984,7 @@ vint64m2_t test_vslide1up_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_tumu( @@ -993,7 +993,7 @@ vint64m4_t test_vslide1up_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_tumu( @@ -1002,7 +1002,7 @@ vint64m8_t test_vslide1up_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_tumu( @@ -1011,7 +1011,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_tumu( @@ -1020,7 +1020,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_tumu( @@ -1029,7 +1029,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_tumu( @@ -1038,7 +1038,7 @@ vuint8m1_t test_vslide1up_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_tumu( @@ -1047,7 +1047,7 @@ vuint8m2_t test_vslide1up_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_tumu( @@ -1056,7 +1056,7 @@ vuint8m4_t test_vslide1up_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_tumu( @@ -1065,7 +1065,7 @@ vuint8m8_t test_vslide1up_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16mf4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_tumu( @@ -1074,7 +1074,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_tumu( @@ -1083,7 +1083,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_tumu( @@ -1092,7 +1092,7 @@ vuint16m1_t test_vslide1up_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_tumu( @@ -1101,7 +1101,7 @@ vuint16m2_t test_vslide1up_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_tumu( @@ -1110,7 +1110,7 @@ vuint16m4_t test_vslide1up_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_tumu( @@ -1119,7 +1119,7 @@ vuint16m8_t test_vslide1up_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32mf2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32mf2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_tumu( @@ -1128,7 +1128,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_tumu( @@ -1137,7 +1137,7 @@ vuint32m1_t test_vslide1up_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_tumu( @@ -1146,7 +1146,7 @@ vuint32m2_t test_vslide1up_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_tumu( @@ -1155,7 +1155,7 @@ vuint32m4_t test_vslide1up_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_tumu( @@ -1164,7 +1164,7 @@ vuint32m8_t test_vslide1up_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m1_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m1_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_tumu( @@ -1173,7 +1173,7 @@ vuint64m1_t test_vslide1up_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m2_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m2_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_tumu( @@ -1182,7 +1182,7 @@ vuint64m2_t test_vslide1up_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m4_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m4_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m4_t test_vslide1up_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m8_tumu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m8_tumu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vslide1up_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_mu( @@ -1218,7 +1218,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_mu( @@ -1227,7 +1227,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_mu( @@ -1236,7 +1236,7 @@ vint8m1_t test_vslide1up_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_mu( @@ -1245,7 +1245,7 @@ vint8m2_t test_vslide1up_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_mu( @@ -1254,7 +1254,7 @@ vint8m4_t test_vslide1up_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1up_vx_i8m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i8m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_mu( @@ -1263,7 +1263,7 @@ vint8m8_t test_vslide1up_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16mf4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_mu( @@ -1272,7 +1272,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_mu( @@ -1281,7 +1281,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_mu( @@ -1290,7 +1290,7 @@ vint16m1_t test_vslide1up_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_mu( @@ -1299,7 +1299,7 @@ vint16m2_t test_vslide1up_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_mu( @@ -1308,7 +1308,7 @@ vint16m4_t test_vslide1up_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1up_vx_i16m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i16m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_mu( @@ -1317,7 +1317,7 @@ vint16m8_t test_vslide1up_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_mu( @@ -1326,7 +1326,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_mu( @@ -1335,7 +1335,7 @@ vint32m1_t test_vslide1up_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_mu( @@ -1344,7 +1344,7 @@ vint32m2_t test_vslide1up_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_mu( @@ -1353,7 +1353,7 @@ vint32m4_t test_vslide1up_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1up_vx_i32m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i32m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_mu( @@ -1362,7 +1362,7 @@ vint32m8_t test_vslide1up_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_mu( @@ -1371,7 +1371,7 @@ vint64m1_t test_vslide1up_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_mu( @@ -1380,7 +1380,7 @@ vint64m2_t test_vslide1up_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_mu( @@ -1389,7 +1389,7 @@ vint64m4_t test_vslide1up_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1up_vx_i64m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_i64m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_mu( @@ -1398,7 +1398,7 @@ vint64m8_t test_vslide1up_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_mu( @@ -1407,7 +1407,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_mu( @@ -1416,7 +1416,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_mu( @@ -1425,7 +1425,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_mu( @@ -1434,7 +1434,7 @@ vuint8m1_t test_vslide1up_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_mu( @@ -1443,7 +1443,7 @@ vuint8m2_t test_vslide1up_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_mu( @@ -1452,7 +1452,7 @@ vuint8m4_t test_vslide1up_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1up_vx_u8m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u8m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_mu( @@ -1461,7 +1461,7 @@ vuint8m8_t test_vslide1up_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16mf4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_mu( @@ -1470,7 +1470,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_mu( @@ -1479,7 +1479,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_mu( @@ -1488,7 +1488,7 @@ vuint16m1_t test_vslide1up_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_mu( @@ -1497,7 +1497,7 @@ vuint16m2_t test_vslide1up_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_mu( @@ -1506,7 +1506,7 @@ vuint16m4_t test_vslide1up_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1up_vx_u16m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u16m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_mu( @@ -1515,7 +1515,7 @@ vuint16m8_t test_vslide1up_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32mf2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32mf2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_mu( @@ -1524,7 +1524,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_mu( @@ -1533,7 +1533,7 @@ vuint32m1_t test_vslide1up_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_mu( @@ -1542,7 +1542,7 @@ vuint32m2_t test_vslide1up_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_mu( @@ -1551,7 +1551,7 @@ vuint32m4_t test_vslide1up_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1up_vx_u32m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u32m8_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_mu( @@ -1560,7 +1560,7 @@ vuint32m8_t test_vslide1up_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m1_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m1_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_mu( @@ -1569,7 +1569,7 @@ vuint64m1_t test_vslide1up_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m2_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m2_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_mu( @@ -1578,7 +1578,7 @@ vuint64m2_t test_vslide1up_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m4_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m4_mu(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m4_t test_vslide1up_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1up_vx_u64m8_mu(mask, maskedoff, src, value, vl); + return __riscv_vslide1up_vx_u64m8_mu(mask, maskedoff, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c index 80bcc68a2e81..0d9bcdcf500f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16mf4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16mf2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m1_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m1_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32mf2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m1_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m1_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m1_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m1_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tu( @@ -148,7 +148,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tu( @@ -157,7 +157,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tu( @@ -166,7 +166,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tu( @@ -175,7 +175,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m1_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m1_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tu( @@ -184,7 +184,7 @@ vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tu( @@ -193,7 +193,7 @@ vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tu( @@ -202,7 +202,7 @@ vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tu( @@ -211,7 +211,7 @@ vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16mf4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tu( @@ -220,7 +220,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16mf2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tu( @@ -229,7 +229,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m1_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m1_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tu( @@ -238,7 +238,7 @@ vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tu( @@ -247,7 +247,7 @@ vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tu( @@ -256,7 +256,7 @@ vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tu( @@ -265,7 +265,7 @@ vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32mf2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tu( @@ -274,7 +274,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m1_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m1_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tu( @@ -283,7 +283,7 @@ vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tu( @@ -292,7 +292,7 @@ vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tu( @@ -301,7 +301,7 @@ vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tu( @@ -310,7 +310,7 @@ vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m1_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m1_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tu( @@ -319,7 +319,7 @@ vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tu( @@ -328,7 +328,7 @@ vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tu( @@ -337,7 +337,7 @@ vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tu( @@ -346,7 +346,7 @@ vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tu( @@ -355,7 +355,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tu( @@ -364,7 +364,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tu( @@ -373,7 +373,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m1_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m1_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tu( @@ -382,7 +382,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tu( @@ -391,7 +391,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tu( @@ -400,7 +400,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tu( @@ -409,7 +409,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16mf4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tu( @@ -418,7 +418,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16mf2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tu( @@ -427,7 +427,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m1_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m1_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tu( @@ -436,7 +436,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tu( @@ -445,7 +445,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tu( @@ -454,7 +454,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tu( @@ -463,7 +463,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32mf2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tu( @@ -472,7 +472,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m1_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m1_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tu( @@ -481,7 +481,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tu( @@ -490,7 +490,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tu( @@ -499,7 +499,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tu( @@ -508,7 +508,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m1_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m1_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tu( @@ -517,7 +517,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m2_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m2_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tu( @@ -526,7 +526,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m4_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m4_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tu( @@ -535,7 +535,7 @@ vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m8_tu(maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tum( @@ -544,7 +544,7 @@ vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16mf4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tum( @@ -553,7 +553,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16mf2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tum( @@ -562,7 +562,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m1_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m1_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tum( @@ -571,7 +571,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tum( @@ -580,7 +580,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tum( @@ -589,7 +589,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tum( @@ -598,7 +598,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32mf2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tum( @@ -607,7 +607,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m1_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m1_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tum( @@ -616,7 +616,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tum( @@ -625,7 +625,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tum( @@ -634,7 +634,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tum( @@ -643,7 +643,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m1_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m1_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tum( @@ -652,7 +652,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tum( @@ -661,7 +661,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tum( @@ -670,7 +670,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tum( @@ -679,7 +679,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tum( @@ -688,7 +688,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tum( @@ -697,7 +697,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tum( @@ -706,7 +706,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m1_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m1_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tum( @@ -715,7 +715,7 @@ vint8m1_t test_vslidedown_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tum( @@ -724,7 +724,7 @@ vint8m2_t test_vslidedown_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tum( @@ -733,7 +733,7 @@ vint8m4_t test_vslidedown_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tum( @@ -742,7 +742,7 @@ vint8m8_t test_vslidedown_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16mf4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tum( @@ -751,7 +751,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16mf2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tum( @@ -760,7 +760,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m1_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m1_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tum( @@ -769,7 +769,7 @@ vint16m1_t test_vslidedown_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tum( @@ -778,7 +778,7 @@ vint16m2_t test_vslidedown_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tum( @@ -787,7 +787,7 @@ vint16m4_t test_vslidedown_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tum( @@ -796,7 +796,7 @@ vint16m8_t test_vslidedown_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32mf2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tum( @@ -805,7 +805,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m1_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m1_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tum( @@ -814,7 +814,7 @@ vint32m1_t test_vslidedown_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tum( @@ -823,7 +823,7 @@ vint32m2_t test_vslidedown_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tum( @@ -832,7 +832,7 @@ vint32m4_t test_vslidedown_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tum( @@ -841,7 +841,7 @@ vint32m8_t test_vslidedown_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m1_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m1_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tum( @@ -850,7 +850,7 @@ vint64m1_t test_vslidedown_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tum( @@ -859,7 +859,7 @@ vint64m2_t test_vslidedown_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tum( @@ -868,7 +868,7 @@ vint64m4_t test_vslidedown_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tum( @@ -877,7 +877,7 @@ vint64m8_t test_vslidedown_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tum( @@ -886,7 +886,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tum( @@ -895,7 +895,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tum( @@ -904,7 +904,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m1_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m1_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tum( @@ -913,7 +913,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tum( @@ -922,7 +922,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tum( @@ -931,7 +931,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tum( @@ -940,7 +940,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16mf4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tum( @@ -949,7 +949,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16mf2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tum( @@ -958,7 +958,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m1_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m1_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tum( @@ -967,7 +967,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tum( @@ -976,7 +976,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tum( @@ -985,7 +985,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tum( @@ -994,7 +994,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32mf2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tum( @@ -1003,7 +1003,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m1_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m1_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tum( @@ -1012,7 +1012,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tum( @@ -1021,7 +1021,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tum( @@ -1030,7 +1030,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tum( @@ -1039,7 +1039,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m1_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m1_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tum( @@ -1048,7 +1048,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m2_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m2_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tum( @@ -1057,7 +1057,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m4_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m4_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tum( @@ -1066,7 +1066,7 @@ vuint64m4_t test_vslidedown_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m8_tum(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m8_tum(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tumu( @@ -1075,7 +1075,7 @@ vuint64m8_t test_vslidedown_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16mf4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tumu( @@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16mf2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tumu( @@ -1093,7 +1093,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m1_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m1_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tumu( @@ -1102,7 +1102,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tumu( @@ -1111,7 +1111,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tumu( @@ -1120,7 +1120,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tumu( @@ -1129,7 +1129,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32mf2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tumu( @@ -1138,7 +1138,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m1_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m1_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tumu( @@ -1147,7 +1147,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tumu( @@ -1156,7 +1156,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tumu( @@ -1165,7 +1165,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tumu( @@ -1174,7 +1174,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m1_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m1_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tumu( @@ -1183,7 +1183,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tumu( @@ -1192,7 +1192,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tumu( @@ -1201,7 +1201,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tumu( @@ -1210,7 +1210,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tumu( @@ -1219,7 +1219,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tumu( @@ -1228,7 +1228,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tumu( @@ -1237,7 +1237,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m1_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m1_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tumu( @@ -1246,7 +1246,7 @@ vint8m1_t test_vslidedown_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tumu( @@ -1255,7 +1255,7 @@ vint8m2_t test_vslidedown_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tumu( @@ -1264,7 +1264,7 @@ vint8m4_t test_vslidedown_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tumu( @@ -1273,7 +1273,7 @@ vint8m8_t test_vslidedown_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16mf4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tumu( @@ -1282,7 +1282,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16mf2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tumu( @@ -1291,7 +1291,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m1_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m1_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tumu( @@ -1300,7 +1300,7 @@ vint16m1_t test_vslidedown_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tumu( @@ -1309,7 +1309,7 @@ vint16m2_t test_vslidedown_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tumu( @@ -1318,7 +1318,7 @@ vint16m4_t test_vslidedown_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tumu( @@ -1327,7 +1327,7 @@ vint16m8_t test_vslidedown_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32mf2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tumu( @@ -1336,7 +1336,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m1_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m1_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tumu( @@ -1345,7 +1345,7 @@ vint32m1_t test_vslidedown_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tumu( @@ -1354,7 +1354,7 @@ vint32m2_t test_vslidedown_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tumu( @@ -1363,7 +1363,7 @@ vint32m4_t test_vslidedown_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tumu( @@ -1372,7 +1372,7 @@ vint32m8_t test_vslidedown_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m1_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m1_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tumu( @@ -1381,7 +1381,7 @@ vint64m1_t test_vslidedown_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tumu( @@ -1390,7 +1390,7 @@ vint64m2_t test_vslidedown_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tumu( @@ -1399,7 +1399,7 @@ vint64m4_t test_vslidedown_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tumu( @@ -1408,7 +1408,7 @@ vint64m8_t test_vslidedown_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tumu( @@ -1417,7 +1417,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tumu( @@ -1426,7 +1426,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tumu( @@ -1435,7 +1435,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m1_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m1_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tumu( @@ -1444,7 +1444,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tumu( @@ -1453,7 +1453,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tumu( @@ -1462,7 +1462,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tumu( @@ -1471,7 +1471,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16mf4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tumu( @@ -1480,7 +1480,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16mf2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tumu( @@ -1489,7 +1489,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m1_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m1_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tumu( @@ -1498,7 +1498,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tumu( @@ -1507,7 +1507,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tumu( @@ -1516,7 +1516,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tumu( @@ -1525,7 +1525,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32mf2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tumu( @@ -1534,7 +1534,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m1_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m1_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tumu( @@ -1543,7 +1543,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tumu( @@ -1552,7 +1552,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tumu( @@ -1561,7 +1561,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tumu( @@ -1570,7 +1570,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m1_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m1_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tumu( @@ -1579,7 +1579,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m2_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m2_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tumu( @@ -1588,7 +1588,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m4_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m4_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tumu( @@ -1597,7 +1597,7 @@ vuint64m4_t test_vslidedown_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m8_tumu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m8_tumu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_mu( @@ -1606,7 +1606,7 @@ vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslidedown_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16mf4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_mu( @@ -1615,7 +1615,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslidedown_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16mf2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_mu( @@ -1624,7 +1624,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslidedown_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m1_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m1_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_mu( @@ -1633,7 +1633,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslidedown_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_mu( @@ -1642,7 +1642,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslidedown_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_mu( @@ -1651,7 +1651,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslidedown_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f16m8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_mu( @@ -1660,7 +1660,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32mf2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_mu( @@ -1669,7 +1669,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m1_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m1_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_mu( @@ -1678,7 +1678,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_mu( @@ -1687,7 +1687,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_mu( @@ -1696,7 +1696,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32m8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f32m8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_mu( @@ -1705,7 +1705,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m1_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m1_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_mu( @@ -1714,7 +1714,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_mu( @@ -1723,7 +1723,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_mu( @@ -1732,7 +1732,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f64m8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_f64m8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_mu( @@ -1741,7 +1741,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_mu( @@ -1750,7 +1750,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_mu( @@ -1759,7 +1759,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8mf2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8mf2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_mu( @@ -1768,7 +1768,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m1_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m1_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_mu( @@ -1777,7 +1777,7 @@ vint8m1_t test_vslidedown_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_mu( @@ -1786,7 +1786,7 @@ vint8m2_t test_vslidedown_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_mu( @@ -1795,7 +1795,7 @@ vint8m4_t test_vslidedown_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i8m8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i8m8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_mu( @@ -1804,7 +1804,7 @@ vint8m8_t test_vslidedown_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16mf4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_mu( @@ -1813,7 +1813,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16mf2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16mf2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_mu( @@ -1822,7 +1822,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m1_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m1_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_mu( @@ -1831,7 +1831,7 @@ vint16m1_t test_vslidedown_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_mu( @@ -1840,7 +1840,7 @@ vint16m2_t test_vslidedown_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_mu( @@ -1849,7 +1849,7 @@ vint16m4_t test_vslidedown_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i16m8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i16m8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_mu( @@ -1858,7 +1858,7 @@ vint16m8_t test_vslidedown_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32mf2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_mu( @@ -1867,7 +1867,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m1_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m1_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_mu( @@ -1876,7 +1876,7 @@ vint32m1_t test_vslidedown_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_mu( @@ -1885,7 +1885,7 @@ vint32m2_t test_vslidedown_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_mu( @@ -1894,7 +1894,7 @@ vint32m4_t test_vslidedown_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32m8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i32m8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_mu( @@ -1903,7 +1903,7 @@ vint32m8_t test_vslidedown_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m1_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m1_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_mu( @@ -1912,7 +1912,7 @@ vint64m1_t test_vslidedown_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_mu( @@ -1921,7 +1921,7 @@ vint64m2_t test_vslidedown_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_mu( @@ -1930,7 +1930,7 @@ vint64m4_t test_vslidedown_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_i64m8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_i64m8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_mu( @@ -1939,7 +1939,7 @@ vint64m8_t test_vslidedown_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_mu( @@ -1948,7 +1948,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_mu( @@ -1957,7 +1957,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8mf2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8mf2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_mu( @@ -1966,7 +1966,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m1_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m1_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_mu( @@ -1975,7 +1975,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_mu( @@ -1984,7 +1984,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_mu( @@ -1993,7 +1993,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u8m8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_mu( @@ -2002,7 +2002,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16mf4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_mu( @@ -2011,7 +2011,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16mf2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16mf2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_mu( @@ -2020,7 +2020,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m1_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m1_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_mu( @@ -2029,7 +2029,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_mu( @@ -2038,7 +2038,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_mu( @@ -2047,7 +2047,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u16m8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u16m8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_mu( @@ -2056,7 +2056,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32mf2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_mu( @@ -2065,7 +2065,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m1_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m1_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_mu( @@ -2074,7 +2074,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_mu( @@ -2083,7 +2083,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_mu( @@ -2092,7 +2092,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32m8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u32m8_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_mu( @@ -2101,7 +2101,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m1_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m1_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_mu( @@ -2110,7 +2110,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m2_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m2_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_mu( @@ -2119,7 +2119,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m4_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m4_mu(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_mu( @@ -2128,6 +2128,6 @@ vuint64m4_t test_vslidedown_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u64m8_mu(mask, maskedoff, src, offset, vl); + return __riscv_vslidedown_vx_u64m8_mu(mask, maskedoff, src, offset, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslideup.c index bd4c14ed099e..5a62f8174a05 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslideup.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_tu( @@ -22,7 +22,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t dest, vfloat16mf4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_tu( @@ -31,7 +31,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t dest, vfloat16mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m1_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f16m1_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_tu( @@ -40,7 +40,7 @@ vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t dest, vfloat16m1_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f16m2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_tu( @@ -49,7 +49,7 @@ vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t dest, vfloat16m2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f16m4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_tu( @@ -58,7 +58,7 @@ vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t dest, vfloat16m4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f16m8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tu( @@ -67,7 +67,7 @@ vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t dest, vfloat16m8_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32mf2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f32mf2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_tu( @@ -76,7 +76,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t dest, vfloat32mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m1_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f32m1_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_tu( @@ -85,7 +85,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t dest, vfloat32m1_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f32m2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_tu( @@ -94,7 +94,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t dest, vfloat32m2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f32m4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_tu( @@ -103,7 +103,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t dest, vfloat32m4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f32m8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_tu( @@ -112,7 +112,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t dest, vfloat32m8_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m1_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f64m1_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_tu( @@ -121,7 +121,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t dest, vfloat64m1_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f64m2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_tu( @@ -130,7 +130,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t dest, vfloat64m2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f64m4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_tu( @@ -139,7 +139,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t dest, vfloat64m4_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_f64m8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_tu( @@ -148,7 +148,7 @@ vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t dest, vfloat64m8_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_tu( @@ -157,7 +157,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t dest, vint8mf8_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_tu( @@ -166,7 +166,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t dest, vint8mf4_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_tu( @@ -175,7 +175,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t dest, vint8mf2_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m1_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i8m1_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_tu( @@ -184,7 +184,7 @@ vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t dest, vint8m1_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i8m2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_tu( @@ -193,7 +193,7 @@ vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t dest, vint8m2_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i8m4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_tu( @@ -202,7 +202,7 @@ vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t dest, vint8m4_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i8m8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_tu( @@ -211,7 +211,7 @@ vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t dest, vint8m8_t src, size_t offset, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_tu( @@ -220,7 +220,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t dest, vint16mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_tu( @@ -229,7 +229,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t dest, vint16mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m1_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i16m1_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_tu( @@ -238,7 +238,7 @@ vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t dest, vint16m1_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i16m2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_tu( @@ -247,7 +247,7 @@ vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t dest, vint16m2_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i16m4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_tu( @@ -256,7 +256,7 @@ vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t dest, vint16m4_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i16m8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tu( @@ -265,7 +265,7 @@ vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t dest, vint16m8_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32mf2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i32mf2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_tu( @@ -274,7 +274,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t dest, vint32mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m1_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i32m1_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_tu( @@ -283,7 +283,7 @@ vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t dest, vint32m1_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i32m2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_tu( @@ -292,7 +292,7 @@ vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t dest, vint32m2_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i32m4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_tu( @@ -301,7 +301,7 @@ vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t dest, vint32m4_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i32m8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_tu( @@ -310,7 +310,7 @@ vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t dest, vint32m8_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m1_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i64m1_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_tu( @@ -319,7 +319,7 @@ vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t dest, vint64m1_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i64m2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_tu( @@ -328,7 +328,7 @@ vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t dest, vint64m2_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i64m4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_tu( @@ -337,7 +337,7 @@ vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t dest, vint64m4_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_i64m8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_tu( @@ -346,7 +346,7 @@ vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t dest, vint64m8_t src, size_t off // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_tu( @@ -355,7 +355,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t dest, vuint8mf8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_tu( @@ -364,7 +364,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t dest, vuint8mf4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_tu( @@ -373,7 +373,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t dest, vuint8mf2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m1_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u8m1_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_tu( @@ -382,7 +382,7 @@ vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t dest, vuint8m1_t src, size_t offs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u8m2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_tu( @@ -391,7 +391,7 @@ vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t dest, vuint8m2_t src, size_t offs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u8m4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_tu( @@ -400,7 +400,7 @@ vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t dest, vuint8m4_t src, size_t offs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u8m8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_tu( @@ -409,7 +409,7 @@ vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t dest, vuint8m8_t src, size_t offs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_tu( @@ -418,7 +418,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t dest, vuint16mf4_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_tu( @@ -427,7 +427,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t dest, vuint16mf2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m1_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u16m1_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_tu( @@ -436,7 +436,7 @@ vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t dest, vuint16m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u16m2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_tu( @@ -445,7 +445,7 @@ vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t dest, vuint16m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u16m4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_tu( @@ -454,7 +454,7 @@ vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t dest, vuint16m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u16m8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tu( @@ -463,7 +463,7 @@ vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t dest, vuint16m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32mf2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u32mf2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_tu( @@ -472,7 +472,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t dest, vuint32mf2_t src, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m1_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u32m1_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_tu( @@ -481,7 +481,7 @@ vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t dest, vuint32m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u32m2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_tu( @@ -490,7 +490,7 @@ vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t dest, vuint32m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u32m4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_tu( @@ -499,7 +499,7 @@ vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t dest, vuint32m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u32m8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_tu( @@ -508,7 +508,7 @@ vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t dest, vuint32m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m1_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u64m1_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_tu( @@ -517,7 +517,7 @@ vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t dest, vuint64m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m2_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u64m2_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_tu( @@ -526,7 +526,7 @@ vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t dest, vuint64m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m4_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u64m4_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_tu( @@ -535,7 +535,7 @@ vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t dest, vuint64m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m8_tu(dest, src, offset, vl); + return __riscv_vslideup_vx_u64m8_tu(dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_tum( @@ -544,7 +544,7 @@ vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t dest, vuint64m8_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_tum( @@ -553,7 +553,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t dest, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_tum( @@ -562,7 +562,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t dest, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m1_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m1_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_tum( @@ -571,7 +571,7 @@ vfloat16m1_t test_vslideup_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_tum( @@ -580,7 +580,7 @@ vfloat16m2_t test_vslideup_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_tum( @@ -589,7 +589,7 @@ vfloat16m4_t test_vslideup_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tum( @@ -598,7 +598,7 @@ vfloat16m8_t test_vslideup_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32mf2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32mf2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_tum( @@ -607,7 +607,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t dest, vf // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m1_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m1_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_tum( @@ -616,7 +616,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_tum( @@ -625,7 +625,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_tum( @@ -634,7 +634,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_tum( @@ -643,7 +643,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m1_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m1_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_tum( @@ -652,7 +652,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_tum( @@ -661,7 +661,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_tum( @@ -670,7 +670,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_tum( @@ -679,7 +679,7 @@ vfloat64m8_t test_vslideup_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_tum( @@ -688,7 +688,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t dest, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_tum( @@ -697,7 +697,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t dest, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_tum( @@ -706,7 +706,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t dest, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_tum(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m1_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m1_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_tum( @@ -715,7 +715,7 @@ vint8m1_t test_vslideup_vx_i8m1_tum(vbool8_t mask, vint8m1_t dest, vint8m1_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_tum(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_tum( @@ -724,7 +724,7 @@ vint8m2_t test_vslideup_vx_i8m2_tum(vbool4_t mask, vint8m2_t dest, vint8m2_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_tum(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_tum( @@ -733,7 +733,7 @@ vint8m4_t test_vslideup_vx_i8m4_tum(vbool2_t mask, vint8m4_t dest, vint8m4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_tum(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_tum( @@ -742,7 +742,7 @@ vint8m8_t test_vslideup_vx_i8m8_tum(vbool1_t mask, vint8m8_t dest, vint8m8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_tum( @@ -751,7 +751,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t dest, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_tum( @@ -760,7 +760,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t dest, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_tum(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m1_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m1_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_tum( @@ -769,7 +769,7 @@ vint16m1_t test_vslideup_vx_i16m1_tum(vbool16_t mask, vint16m1_t dest, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_tum(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_tum( @@ -778,7 +778,7 @@ vint16m2_t test_vslideup_vx_i16m2_tum(vbool8_t mask, vint16m2_t dest, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_tum(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_tum( @@ -787,7 +787,7 @@ vint16m4_t test_vslideup_vx_i16m4_tum(vbool4_t mask, vint16m4_t dest, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_tum(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tum( @@ -796,7 +796,7 @@ vint16m8_t test_vslideup_vx_i16m8_tum(vbool2_t mask, vint16m8_t dest, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32mf2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32mf2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_tum( @@ -805,7 +805,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t dest, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_tum(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m1_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m1_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_tum( @@ -814,7 +814,7 @@ vint32m1_t test_vslideup_vx_i32m1_tum(vbool32_t mask, vint32m1_t dest, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_tum(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_tum( @@ -823,7 +823,7 @@ vint32m2_t test_vslideup_vx_i32m2_tum(vbool16_t mask, vint32m2_t dest, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_tum(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_tum( @@ -832,7 +832,7 @@ vint32m4_t test_vslideup_vx_i32m4_tum(vbool8_t mask, vint32m4_t dest, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_tum(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_tum( @@ -841,7 +841,7 @@ vint32m8_t test_vslideup_vx_i32m8_tum(vbool4_t mask, vint32m8_t dest, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_tum(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m1_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m1_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_tum( @@ -850,7 +850,7 @@ vint64m1_t test_vslideup_vx_i64m1_tum(vbool64_t mask, vint64m1_t dest, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_tum(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_tum( @@ -859,7 +859,7 @@ vint64m2_t test_vslideup_vx_i64m2_tum(vbool32_t mask, vint64m2_t dest, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_tum(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_tum( @@ -868,7 +868,7 @@ vint64m4_t test_vslideup_vx_i64m4_tum(vbool16_t mask, vint64m4_t dest, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_tum(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_tum( @@ -877,7 +877,7 @@ vint64m8_t test_vslideup_vx_i64m8_tum(vbool8_t mask, vint64m8_t dest, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_tum( @@ -886,7 +886,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t dest, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_tum( @@ -895,7 +895,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t dest, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_tum( @@ -904,7 +904,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t dest, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_tum(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m1_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m1_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_tum( @@ -913,7 +913,7 @@ vuint8m1_t test_vslideup_vx_u8m1_tum(vbool8_t mask, vuint8m1_t dest, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_tum(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_tum( @@ -922,7 +922,7 @@ vuint8m2_t test_vslideup_vx_u8m2_tum(vbool4_t mask, vuint8m2_t dest, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_tum(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_tum( @@ -931,7 +931,7 @@ vuint8m4_t test_vslideup_vx_u8m4_tum(vbool2_t mask, vuint8m4_t dest, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_tum(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_tum( @@ -940,7 +940,7 @@ vuint8m8_t test_vslideup_vx_u8m8_tum(vbool1_t mask, vuint8m8_t dest, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_tum( @@ -949,7 +949,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t dest, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_tum( @@ -958,7 +958,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t dest, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_tum(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m1_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m1_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_tum( @@ -967,7 +967,7 @@ vuint16m1_t test_vslideup_vx_u16m1_tum(vbool16_t mask, vuint16m1_t dest, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_tum(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_tum( @@ -976,7 +976,7 @@ vuint16m2_t test_vslideup_vx_u16m2_tum(vbool8_t mask, vuint16m2_t dest, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_tum(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_tum( @@ -985,7 +985,7 @@ vuint16m4_t test_vslideup_vx_u16m4_tum(vbool4_t mask, vuint16m4_t dest, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_tum(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tum( @@ -994,7 +994,7 @@ vuint16m8_t test_vslideup_vx_u16m8_tum(vbool2_t mask, vuint16m8_t dest, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32mf2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32mf2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_tum( @@ -1003,7 +1003,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t dest, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_tum(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m1_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m1_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_tum( @@ -1012,7 +1012,7 @@ vuint32m1_t test_vslideup_vx_u32m1_tum(vbool32_t mask, vuint32m1_t dest, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_tum(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_tum( @@ -1021,7 +1021,7 @@ vuint32m2_t test_vslideup_vx_u32m2_tum(vbool16_t mask, vuint32m2_t dest, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_tum(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_tum( @@ -1030,7 +1030,7 @@ vuint32m4_t test_vslideup_vx_u32m4_tum(vbool8_t mask, vuint32m4_t dest, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_tum(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_tum( @@ -1039,7 +1039,7 @@ vuint32m8_t test_vslideup_vx_u32m8_tum(vbool4_t mask, vuint32m8_t dest, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_tum(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m1_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m1_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_tum( @@ -1048,7 +1048,7 @@ vuint64m1_t test_vslideup_vx_u64m1_tum(vbool64_t mask, vuint64m1_t dest, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_tum(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m2_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m2_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_tum( @@ -1057,7 +1057,7 @@ vuint64m2_t test_vslideup_vx_u64m2_tum(vbool32_t mask, vuint64m2_t dest, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_tum(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m4_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m4_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_tum( @@ -1066,7 +1066,7 @@ vuint64m4_t test_vslideup_vx_u64m4_tum(vbool16_t mask, vuint64m4_t dest, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_tum(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m8_tum(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m8_tum(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_tumu( @@ -1075,7 +1075,7 @@ vuint64m8_t test_vslideup_vx_u64m8_tum(vbool8_t mask, vuint64m8_t dest, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_tumu( @@ -1084,7 +1084,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t dest, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_tumu( @@ -1093,7 +1093,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t dest, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m1_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m1_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_tumu( @@ -1102,7 +1102,7 @@ vfloat16m1_t test_vslideup_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t dest, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_tumu( @@ -1111,7 +1111,7 @@ vfloat16m2_t test_vslideup_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_tumu( @@ -1120,7 +1120,7 @@ vfloat16m4_t test_vslideup_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_tumu( @@ -1129,7 +1129,7 @@ vfloat16m8_t test_vslideup_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32mf2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32mf2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_tumu( @@ -1138,7 +1138,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t dest, v // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m1_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m1_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_tumu( @@ -1147,7 +1147,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t dest, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_tumu( @@ -1156,7 +1156,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t dest, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_tumu( @@ -1165,7 +1165,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_tumu( @@ -1174,7 +1174,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m1_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m1_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_tumu( @@ -1183,7 +1183,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t dest, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_tumu( @@ -1192,7 +1192,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t dest, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_tumu( @@ -1201,7 +1201,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t dest, vflo // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_tumu( @@ -1210,7 +1210,7 @@ vfloat64m8_t test_vslideup_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t dest, vfloa // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_tumu( @@ -1219,7 +1219,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t dest, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_tumu( @@ -1228,7 +1228,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t dest, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_tumu( @@ -1237,7 +1237,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t dest, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_tumu(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m1_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m1_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_tumu( @@ -1246,7 +1246,7 @@ vint8m1_t test_vslideup_vx_i8m1_tumu(vbool8_t mask, vint8m1_t dest, vint8m1_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_tumu(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_tumu( @@ -1255,7 +1255,7 @@ vint8m2_t test_vslideup_vx_i8m2_tumu(vbool4_t mask, vint8m2_t dest, vint8m2_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_tumu(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_tumu( @@ -1264,7 +1264,7 @@ vint8m4_t test_vslideup_vx_i8m4_tumu(vbool2_t mask, vint8m4_t dest, vint8m4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_tumu(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_tumu( @@ -1273,7 +1273,7 @@ vint8m8_t test_vslideup_vx_i8m8_tumu(vbool1_t mask, vint8m8_t dest, vint8m8_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_tumu( @@ -1282,7 +1282,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t dest, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_tumu( @@ -1291,7 +1291,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t dest, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_tumu(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m1_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m1_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_tumu( @@ -1300,7 +1300,7 @@ vint16m1_t test_vslideup_vx_i16m1_tumu(vbool16_t mask, vint16m1_t dest, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_tumu(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_tumu( @@ -1309,7 +1309,7 @@ vint16m2_t test_vslideup_vx_i16m2_tumu(vbool8_t mask, vint16m2_t dest, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_tumu(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_tumu( @@ -1318,7 +1318,7 @@ vint16m4_t test_vslideup_vx_i16m4_tumu(vbool4_t mask, vint16m4_t dest, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_tumu(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_tumu( @@ -1327,7 +1327,7 @@ vint16m8_t test_vslideup_vx_i16m8_tumu(vbool2_t mask, vint16m8_t dest, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32mf2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32mf2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_tumu( @@ -1336,7 +1336,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t dest, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_tumu(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m1_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m1_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_tumu( @@ -1345,7 +1345,7 @@ vint32m1_t test_vslideup_vx_i32m1_tumu(vbool32_t mask, vint32m1_t dest, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_tumu(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_tumu( @@ -1354,7 +1354,7 @@ vint32m2_t test_vslideup_vx_i32m2_tumu(vbool16_t mask, vint32m2_t dest, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_tumu(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_tumu( @@ -1363,7 +1363,7 @@ vint32m4_t test_vslideup_vx_i32m4_tumu(vbool8_t mask, vint32m4_t dest, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_tumu(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_tumu( @@ -1372,7 +1372,7 @@ vint32m8_t test_vslideup_vx_i32m8_tumu(vbool4_t mask, vint32m8_t dest, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_tumu(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m1_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m1_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_tumu( @@ -1381,7 +1381,7 @@ vint64m1_t test_vslideup_vx_i64m1_tumu(vbool64_t mask, vint64m1_t dest, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_tumu(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_tumu( @@ -1390,7 +1390,7 @@ vint64m2_t test_vslideup_vx_i64m2_tumu(vbool32_t mask, vint64m2_t dest, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_tumu(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_tumu( @@ -1399,7 +1399,7 @@ vint64m4_t test_vslideup_vx_i64m4_tumu(vbool16_t mask, vint64m4_t dest, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_tumu(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_tumu( @@ -1408,7 +1408,7 @@ vint64m8_t test_vslideup_vx_i64m8_tumu(vbool8_t mask, vint64m8_t dest, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_tumu( @@ -1417,7 +1417,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t dest, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_tumu( @@ -1426,7 +1426,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t dest, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_tumu( @@ -1435,7 +1435,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t dest, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m1_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m1_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_tumu( @@ -1444,7 +1444,7 @@ vuint8m1_t test_vslideup_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_tumu( @@ -1453,7 +1453,7 @@ vuint8m2_t test_vslideup_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_tumu( @@ -1462,7 +1462,7 @@ vuint8m4_t test_vslideup_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_tumu( @@ -1471,7 +1471,7 @@ vuint8m8_t test_vslideup_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_tumu( @@ -1480,7 +1480,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t dest, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_tumu( @@ -1489,7 +1489,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t dest, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m1_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m1_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_tumu( @@ -1498,7 +1498,7 @@ vuint16m1_t test_vslideup_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t dest, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_tumu( @@ -1507,7 +1507,7 @@ vuint16m2_t test_vslideup_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t dest, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_tumu( @@ -1516,7 +1516,7 @@ vuint16m4_t test_vslideup_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t dest, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_tumu( @@ -1525,7 +1525,7 @@ vuint16m8_t test_vslideup_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t dest, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32mf2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32mf2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_tumu( @@ -1534,7 +1534,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t dest, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m1_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m1_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_tumu( @@ -1543,7 +1543,7 @@ vuint32m1_t test_vslideup_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t dest, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_tumu( @@ -1552,7 +1552,7 @@ vuint32m2_t test_vslideup_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t dest, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_tumu( @@ -1561,7 +1561,7 @@ vuint32m4_t test_vslideup_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t dest, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_tumu( @@ -1570,7 +1570,7 @@ vuint32m8_t test_vslideup_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t dest, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m1_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m1_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_tumu( @@ -1579,7 +1579,7 @@ vuint64m1_t test_vslideup_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t dest, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m2_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m2_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_tumu( @@ -1588,7 +1588,7 @@ vuint64m2_t test_vslideup_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t dest, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m4_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m4_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_tumu( @@ -1597,7 +1597,7 @@ vuint64m4_t test_vslideup_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t dest, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m8_tumu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m8_tumu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_mu( @@ -1606,7 +1606,7 @@ vuint64m8_t test_vslideup_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t dest, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vslideup_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_mu( @@ -1615,7 +1615,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t dest, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vslideup_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16mf2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16mf2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_mu( @@ -1624,7 +1624,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t dest, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vslideup_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m1_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m1_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_mu( @@ -1633,7 +1633,7 @@ vfloat16m1_t test_vslideup_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vslideup_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_mu( @@ -1642,7 +1642,7 @@ vfloat16m2_t test_vslideup_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t dest, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vslideup_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_mu( @@ -1651,7 +1651,7 @@ vfloat16m4_t test_vslideup_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t dest, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vslideup_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f16m8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f16m8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_mu( @@ -1660,7 +1660,7 @@ vfloat16m8_t test_vslideup_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t dest, vfloat1 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32mf2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32mf2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_mu( @@ -1669,7 +1669,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t dest, vfl // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m1_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m1_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_mu( @@ -1678,7 +1678,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_mu( @@ -1687,7 +1687,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_mu( @@ -1696,7 +1696,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t dest, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f32m8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f32m8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_mu( @@ -1705,7 +1705,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t dest, vfloat3 // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m1_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m1_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_mu( @@ -1714,7 +1714,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_mu( @@ -1723,7 +1723,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_mu( @@ -1732,7 +1732,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t dest, vfloat // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_f64m8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_f64m8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_mu( @@ -1741,7 +1741,7 @@ vfloat64m8_t test_vslideup_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t dest, vfloat6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_mu( @@ -1750,7 +1750,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t dest, vint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_mu( @@ -1759,7 +1759,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t dest, vint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8mf2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8mf2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_mu( @@ -1768,7 +1768,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t dest, vint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_mu(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m1_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m1_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_mu( @@ -1777,7 +1777,7 @@ vint8m1_t test_vslideup_vx_i8m1_mu(vbool8_t mask, vint8m1_t dest, vint8m1_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_mu(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_mu( @@ -1786,7 +1786,7 @@ vint8m2_t test_vslideup_vx_i8m2_mu(vbool4_t mask, vint8m2_t dest, vint8m2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_mu(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_mu( @@ -1795,7 +1795,7 @@ vint8m4_t test_vslideup_vx_i8m4_mu(vbool2_t mask, vint8m4_t dest, vint8m4_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_mu(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i8m8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i8m8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_mu( @@ -1804,7 +1804,7 @@ vint8m8_t test_vslideup_vx_i8m8_mu(vbool1_t mask, vint8m8_t dest, vint8m8_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_mu( @@ -1813,7 +1813,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t dest, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16mf2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16mf2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_mu( @@ -1822,7 +1822,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t dest, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_mu(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m1_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m1_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_mu( @@ -1831,7 +1831,7 @@ vint16m1_t test_vslideup_vx_i16m1_mu(vbool16_t mask, vint16m1_t dest, vint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_mu(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_mu( @@ -1840,7 +1840,7 @@ vint16m2_t test_vslideup_vx_i16m2_mu(vbool8_t mask, vint16m2_t dest, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_mu(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_mu( @@ -1849,7 +1849,7 @@ vint16m4_t test_vslideup_vx_i16m4_mu(vbool4_t mask, vint16m4_t dest, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_mu(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i16m8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i16m8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_mu( @@ -1858,7 +1858,7 @@ vint16m8_t test_vslideup_vx_i16m8_mu(vbool2_t mask, vint16m8_t dest, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32mf2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32mf2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_mu( @@ -1867,7 +1867,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t dest, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_mu(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m1_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m1_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_mu( @@ -1876,7 +1876,7 @@ vint32m1_t test_vslideup_vx_i32m1_mu(vbool32_t mask, vint32m1_t dest, vint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_mu(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_mu( @@ -1885,7 +1885,7 @@ vint32m2_t test_vslideup_vx_i32m2_mu(vbool16_t mask, vint32m2_t dest, vint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_mu(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_mu( @@ -1894,7 +1894,7 @@ vint32m4_t test_vslideup_vx_i32m4_mu(vbool8_t mask, vint32m4_t dest, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_mu(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i32m8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i32m8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_mu( @@ -1903,7 +1903,7 @@ vint32m8_t test_vslideup_vx_i32m8_mu(vbool4_t mask, vint32m8_t dest, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_mu(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m1_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m1_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_mu( @@ -1912,7 +1912,7 @@ vint64m1_t test_vslideup_vx_i64m1_mu(vbool64_t mask, vint64m1_t dest, vint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_mu(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_mu( @@ -1921,7 +1921,7 @@ vint64m2_t test_vslideup_vx_i64m2_mu(vbool32_t mask, vint64m2_t dest, vint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_mu(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_mu( @@ -1930,7 +1930,7 @@ vint64m4_t test_vslideup_vx_i64m4_mu(vbool16_t mask, vint64m4_t dest, vint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_mu(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_i64m8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_i64m8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_mu( @@ -1939,7 +1939,7 @@ vint64m8_t test_vslideup_vx_i64m8_mu(vbool8_t mask, vint64m8_t dest, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_mu( @@ -1948,7 +1948,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t dest, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_mu( @@ -1957,7 +1957,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t dest, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8mf2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8mf2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_mu( @@ -1966,7 +1966,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t dest, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_mu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m1_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m1_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_mu( @@ -1975,7 +1975,7 @@ vuint8m1_t test_vslideup_vx_u8m1_mu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_mu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_mu( @@ -1984,7 +1984,7 @@ vuint8m2_t test_vslideup_vx_u8m2_mu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_mu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_mu( @@ -1993,7 +1993,7 @@ vuint8m4_t test_vslideup_vx_u8m4_mu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_mu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u8m8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u8m8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_mu( @@ -2002,7 +2002,7 @@ vuint8m8_t test_vslideup_vx_u8m8_mu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_mu( @@ -2011,7 +2011,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t dest, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16mf2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16mf2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_mu( @@ -2020,7 +2020,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t dest, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_mu(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m1_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m1_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_mu( @@ -2029,7 +2029,7 @@ vuint16m1_t test_vslideup_vx_u16m1_mu(vbool16_t mask, vuint16m1_t dest, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_mu(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_mu( @@ -2038,7 +2038,7 @@ vuint16m2_t test_vslideup_vx_u16m2_mu(vbool8_t mask, vuint16m2_t dest, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_mu(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_mu( @@ -2047,7 +2047,7 @@ vuint16m4_t test_vslideup_vx_u16m4_mu(vbool4_t mask, vuint16m4_t dest, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_mu(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u16m8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u16m8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_mu( @@ -2056,7 +2056,7 @@ vuint16m8_t test_vslideup_vx_u16m8_mu(vbool2_t mask, vuint16m8_t dest, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32mf2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32mf2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_mu( @@ -2065,7 +2065,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t dest, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_mu(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m1_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m1_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_mu( @@ -2074,7 +2074,7 @@ vuint32m1_t test_vslideup_vx_u32m1_mu(vbool32_t mask, vuint32m1_t dest, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_mu(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_mu( @@ -2083,7 +2083,7 @@ vuint32m2_t test_vslideup_vx_u32m2_mu(vbool16_t mask, vuint32m2_t dest, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_mu(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_mu( @@ -2092,7 +2092,7 @@ vuint32m4_t test_vslideup_vx_u32m4_mu(vbool8_t mask, vuint32m4_t dest, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_mu(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u32m8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u32m8_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_mu( @@ -2101,7 +2101,7 @@ vuint32m8_t test_vslideup_vx_u32m8_mu(vbool4_t mask, vuint32m8_t dest, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_mu(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m1_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m1_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_mu( @@ -2110,7 +2110,7 @@ vuint64m1_t test_vslideup_vx_u64m1_mu(vbool64_t mask, vuint64m1_t dest, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_mu(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m2_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m2_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_mu( @@ -2119,7 +2119,7 @@ vuint64m2_t test_vslideup_vx_u64m2_mu(vbool32_t mask, vuint64m2_t dest, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_mu(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m4_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m4_mu(mask, dest, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_mu( @@ -2128,6 +2128,6 @@ vuint64m4_t test_vslideup_vx_u64m4_mu(vbool16_t mask, vuint64m4_t dest, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_mu(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return vslideup_vx_u64m8_mu(mask, dest, src, offset, vl); + return __riscv_vslideup_vx_u64m8_mu(mask, dest, src, offset, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsll.c index 2d57a3b6dde6..d78721b3b267 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsll.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vsll_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vsll_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vsll_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vsll_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vsll_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vsll_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vsll_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vsll_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vsll_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vsll_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vsll_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vsll_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_i8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vsll_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vsll_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vsll_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vsll_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vsll_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vsll_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vsll_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vsll_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vsll_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vsll_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vsll_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vsll_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_i16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vsll_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vsll_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vsll_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vsll_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vsll_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vsll_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vsll_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vsll_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_i32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vsll_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vsll_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_i64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vsll_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vsll_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_i64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vsll_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vsll_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_i64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vsll_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vsll_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_i64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vsll_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_tu( @@ -408,7 +408,7 @@ vint64m8_t test_vsll_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_tu( @@ -417,7 +417,7 @@ vuint8mf8_t test_vsll_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_tu( @@ -426,7 +426,7 @@ vuint8mf8_t test_vsll_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_tu( @@ -435,7 +435,7 @@ vuint8mf4_t test_vsll_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_tu( @@ -444,7 +444,7 @@ vuint8mf4_t test_vsll_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_tu( @@ -453,7 +453,7 @@ vuint8mf2_t test_vsll_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_tu( @@ -462,7 +462,7 @@ vuint8mf2_t test_vsll_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_tu( @@ -471,7 +471,7 @@ vuint8m1_t test_vsll_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_tu( @@ -480,7 +480,7 @@ vuint8m1_t test_vsll_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_tu( @@ -489,7 +489,7 @@ vuint8m2_t test_vsll_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_tu( @@ -498,7 +498,7 @@ vuint8m2_t test_vsll_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_tu( @@ -507,7 +507,7 @@ vuint8m4_t test_vsll_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_tu( @@ -516,7 +516,7 @@ vuint8m4_t test_vsll_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_u8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_tu( @@ -525,7 +525,7 @@ vuint8m8_t test_vsll_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_tu( @@ -534,7 +534,7 @@ vuint8m8_t test_vsll_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_tu( @@ -543,7 +543,7 @@ vuint16mf4_t test_vsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_tu( @@ -552,7 +552,7 @@ vuint16mf4_t test_vsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_tu( @@ -561,7 +561,7 @@ vuint16mf2_t test_vsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_tu( @@ -570,7 +570,7 @@ vuint16mf2_t test_vsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_tu( @@ -579,7 +579,7 @@ vuint16m1_t test_vsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_tu( @@ -588,7 +588,7 @@ vuint16m1_t test_vsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_tu( @@ -597,7 +597,7 @@ vuint16m2_t test_vsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_tu( @@ -606,7 +606,7 @@ vuint16m2_t test_vsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_tu( @@ -615,7 +615,7 @@ vuint16m4_t test_vsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_tu( @@ -624,7 +624,7 @@ vuint16m4_t test_vsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_u16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_tu( @@ -633,7 +633,7 @@ vuint16m8_t test_vsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tu( @@ -642,7 +642,7 @@ vuint16m8_t test_vsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tu( @@ -651,7 +651,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_tu( @@ -660,7 +660,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_tu( @@ -669,7 +669,7 @@ vuint32m1_t test_vsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_tu( @@ -678,7 +678,7 @@ vuint32m1_t test_vsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_tu( @@ -687,7 +687,7 @@ vuint32m2_t test_vsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_tu( @@ -696,7 +696,7 @@ vuint32m2_t test_vsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_tu( @@ -705,7 +705,7 @@ vuint32m4_t test_vsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_tu( @@ -714,7 +714,7 @@ vuint32m4_t test_vsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_u32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_tu( @@ -723,7 +723,7 @@ vuint32m8_t test_vsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_tu( @@ -732,7 +732,7 @@ vuint32m8_t test_vsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_u64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_tu( @@ -741,7 +741,7 @@ vuint64m1_t test_vsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_tu( @@ -750,7 +750,7 @@ vuint64m1_t test_vsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_u64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_tu( @@ -759,7 +759,7 @@ vuint64m2_t test_vsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_tu( @@ -768,7 +768,7 @@ vuint64m2_t test_vsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_u64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_tu( @@ -777,7 +777,7 @@ vuint64m4_t test_vsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_tu( @@ -786,7 +786,7 @@ vuint64m4_t test_vsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_u64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_tu( @@ -795,7 +795,7 @@ vuint64m8_t test_vsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_tum( @@ -804,7 +804,7 @@ vuint64m8_t test_vsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_tum( @@ -813,7 +813,7 @@ vint8mf8_t test_vsll_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_tum( @@ -822,7 +822,7 @@ vint8mf8_t test_vsll_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_tum( @@ -831,7 +831,7 @@ vint8mf4_t test_vsll_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_tum( @@ -840,7 +840,7 @@ vint8mf4_t test_vsll_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_tum( @@ -849,7 +849,7 @@ vint8mf2_t test_vsll_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_tum( @@ -858,7 +858,7 @@ vint8mf2_t test_vsll_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_tum( @@ -867,7 +867,7 @@ vint8m1_t test_vsll_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_tum( @@ -876,7 +876,7 @@ vint8m1_t test_vsll_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_tum( @@ -885,7 +885,7 @@ vint8m2_t test_vsll_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_tum( @@ -894,7 +894,7 @@ vint8m2_t test_vsll_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_tum( @@ -903,7 +903,7 @@ vint8m4_t test_vsll_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_tum( @@ -912,7 +912,7 @@ vint8m4_t test_vsll_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_i8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_tum( @@ -921,7 +921,7 @@ vint8m8_t test_vsll_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_tum( @@ -930,7 +930,7 @@ vint8m8_t test_vsll_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_tum( @@ -939,7 +939,7 @@ vint16mf4_t test_vsll_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_tum( @@ -948,7 +948,7 @@ vint16mf4_t test_vsll_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_tum( @@ -957,7 +957,7 @@ vint16mf2_t test_vsll_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_tum( @@ -966,7 +966,7 @@ vint16mf2_t test_vsll_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_tum( @@ -975,7 +975,7 @@ vint16m1_t test_vsll_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_tum( @@ -984,7 +984,7 @@ vint16m1_t test_vsll_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_tum( @@ -993,7 +993,7 @@ vint16m2_t test_vsll_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_tum( @@ -1002,7 +1002,7 @@ vint16m2_t test_vsll_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_tum( @@ -1011,7 +1011,7 @@ vint16m4_t test_vsll_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_tum( @@ -1020,7 +1020,7 @@ vint16m4_t test_vsll_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_i16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_tum( @@ -1029,7 +1029,7 @@ vint16m8_t test_vsll_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tum( @@ -1038,7 +1038,7 @@ vint16m8_t test_vsll_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tum( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vsll_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_tum( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vsll_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_tum( @@ -1065,7 +1065,7 @@ vint32m1_t test_vsll_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_tum( @@ -1074,7 +1074,7 @@ vint32m1_t test_vsll_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_tum( @@ -1083,7 +1083,7 @@ vint32m2_t test_vsll_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_tum( @@ -1092,7 +1092,7 @@ vint32m2_t test_vsll_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_tum( @@ -1101,7 +1101,7 @@ vint32m4_t test_vsll_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_tum( @@ -1110,7 +1110,7 @@ vint32m4_t test_vsll_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_i32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_tum( @@ -1119,7 +1119,7 @@ vint32m8_t test_vsll_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_tum( @@ -1128,7 +1128,7 @@ vint32m8_t test_vsll_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_i64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_tum( @@ -1137,7 +1137,7 @@ vint64m1_t test_vsll_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_tum( @@ -1146,7 +1146,7 @@ vint64m1_t test_vsll_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_i64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_tum( @@ -1155,7 +1155,7 @@ vint64m2_t test_vsll_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_tum( @@ -1164,7 +1164,7 @@ vint64m2_t test_vsll_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_i64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_tum( @@ -1173,7 +1173,7 @@ vint64m4_t test_vsll_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_tum( @@ -1182,7 +1182,7 @@ vint64m4_t test_vsll_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_i64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_tum( @@ -1191,7 +1191,7 @@ vint64m8_t test_vsll_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_tum( @@ -1200,7 +1200,7 @@ vint64m8_t test_vsll_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_tum( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsll_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_tum( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsll_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_tum( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsll_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_tum( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsll_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_tum( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsll_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_tum( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsll_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_tum( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vsll_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_tum( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vsll_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_tum( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vsll_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_tum( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vsll_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_tum( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vsll_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_tum( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vsll_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_u8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_tum( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vsll_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_tum( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vsll_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_tum( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_tum( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_tum( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_tum( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_tum( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_tum( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_tum( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_tum( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_tum( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_tum( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_u16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_tum( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tum( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tum( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_tum( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_tum( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_tum( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_tum( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_tum( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_tum( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_tum( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_u32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_tum( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_tum( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_u64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_tum( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_tum( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_u64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_tum( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_tum( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_u64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_tum( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_tum( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_u64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_tum( @@ -1587,7 +1587,7 @@ vuint64m8_t test_vsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_tumu( @@ -1596,7 +1596,7 @@ vuint64m8_t test_vsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_tumu( @@ -1605,7 +1605,7 @@ vint8mf8_t test_vsll_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_tumu( @@ -1614,7 +1614,7 @@ vint8mf8_t test_vsll_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_tumu( @@ -1623,7 +1623,7 @@ vint8mf4_t test_vsll_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_tumu( @@ -1632,7 +1632,7 @@ vint8mf4_t test_vsll_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_tumu( @@ -1641,7 +1641,7 @@ vint8mf2_t test_vsll_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_tumu( @@ -1650,7 +1650,7 @@ vint8mf2_t test_vsll_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_tumu( @@ -1659,7 +1659,7 @@ vint8m1_t test_vsll_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_tumu( @@ -1668,7 +1668,7 @@ vint8m1_t test_vsll_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_tumu( @@ -1677,7 +1677,7 @@ vint8m2_t test_vsll_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_tumu( @@ -1686,7 +1686,7 @@ vint8m2_t test_vsll_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_tumu( @@ -1695,7 +1695,7 @@ vint8m4_t test_vsll_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_tumu( @@ -1704,7 +1704,7 @@ vint8m4_t test_vsll_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_i8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_tumu( @@ -1713,7 +1713,7 @@ vint8m8_t test_vsll_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_tumu( @@ -1722,7 +1722,7 @@ vint8m8_t test_vsll_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_tumu( @@ -1731,7 +1731,7 @@ vint16mf4_t test_vsll_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_tumu( @@ -1740,7 +1740,7 @@ vint16mf4_t test_vsll_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_tumu( @@ -1749,7 +1749,7 @@ vint16mf2_t test_vsll_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_tumu( @@ -1758,7 +1758,7 @@ vint16mf2_t test_vsll_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_tumu( @@ -1767,7 +1767,7 @@ vint16m1_t test_vsll_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_tumu( @@ -1776,7 +1776,7 @@ vint16m1_t test_vsll_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_tumu( @@ -1785,7 +1785,7 @@ vint16m2_t test_vsll_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_tumu( @@ -1794,7 +1794,7 @@ vint16m2_t test_vsll_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_tumu( @@ -1803,7 +1803,7 @@ vint16m4_t test_vsll_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_tumu( @@ -1812,7 +1812,7 @@ vint16m4_t test_vsll_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_i16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_tumu( @@ -1821,7 +1821,7 @@ vint16m8_t test_vsll_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_tumu( @@ -1830,7 +1830,7 @@ vint16m8_t test_vsll_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_tumu( @@ -1839,7 +1839,7 @@ vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_tumu( @@ -1848,7 +1848,7 @@ vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_tumu( @@ -1857,7 +1857,7 @@ vint32m1_t test_vsll_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_tumu( @@ -1866,7 +1866,7 @@ vint32m1_t test_vsll_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_tumu( @@ -1875,7 +1875,7 @@ vint32m2_t test_vsll_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_tumu( @@ -1884,7 +1884,7 @@ vint32m2_t test_vsll_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_tumu( @@ -1893,7 +1893,7 @@ vint32m4_t test_vsll_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_tumu( @@ -1902,7 +1902,7 @@ vint32m4_t test_vsll_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_i32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_tumu( @@ -1911,7 +1911,7 @@ vint32m8_t test_vsll_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_tumu( @@ -1920,7 +1920,7 @@ vint32m8_t test_vsll_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_i64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_tumu( @@ -1929,7 +1929,7 @@ vint64m1_t test_vsll_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_tumu( @@ -1938,7 +1938,7 @@ vint64m1_t test_vsll_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_i64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_tumu( @@ -1947,7 +1947,7 @@ vint64m2_t test_vsll_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_tumu( @@ -1956,7 +1956,7 @@ vint64m2_t test_vsll_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_i64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_tumu( @@ -1965,7 +1965,7 @@ vint64m4_t test_vsll_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_tumu( @@ -1974,7 +1974,7 @@ vint64m4_t test_vsll_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_i64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_tumu( @@ -1983,7 +1983,7 @@ vint64m8_t test_vsll_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_tumu( @@ -1992,7 +1992,7 @@ vint64m8_t test_vsll_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_tumu( @@ -2001,7 +2001,7 @@ vuint8mf8_t test_vsll_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_tumu( @@ -2010,7 +2010,7 @@ vuint8mf8_t test_vsll_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_tumu( @@ -2019,7 +2019,7 @@ vuint8mf4_t test_vsll_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_tumu( @@ -2028,7 +2028,7 @@ vuint8mf4_t test_vsll_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_tumu( @@ -2037,7 +2037,7 @@ vuint8mf2_t test_vsll_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_tumu( @@ -2046,7 +2046,7 @@ vuint8mf2_t test_vsll_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_tumu( @@ -2055,7 +2055,7 @@ vuint8m1_t test_vsll_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_tumu( @@ -2064,7 +2064,7 @@ vuint8m1_t test_vsll_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_tumu( @@ -2073,7 +2073,7 @@ vuint8m2_t test_vsll_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_tumu( @@ -2082,7 +2082,7 @@ vuint8m2_t test_vsll_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_tumu( @@ -2091,7 +2091,7 @@ vuint8m4_t test_vsll_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_tumu( @@ -2100,7 +2100,7 @@ vuint8m4_t test_vsll_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_u8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_tumu( @@ -2109,7 +2109,7 @@ vuint8m8_t test_vsll_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_tumu( @@ -2118,7 +2118,7 @@ vuint8m8_t test_vsll_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_tumu( @@ -2127,7 +2127,7 @@ vuint16mf4_t test_vsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_tumu( @@ -2136,7 +2136,7 @@ vuint16mf4_t test_vsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_tumu( @@ -2145,7 +2145,7 @@ vuint16mf2_t test_vsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_tumu( @@ -2154,7 +2154,7 @@ vuint16mf2_t test_vsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_tumu( @@ -2163,7 +2163,7 @@ vuint16m1_t test_vsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_tumu( @@ -2172,7 +2172,7 @@ vuint16m1_t test_vsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_tumu( @@ -2181,7 +2181,7 @@ vuint16m2_t test_vsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_tumu( @@ -2190,7 +2190,7 @@ vuint16m2_t test_vsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_tumu( @@ -2199,7 +2199,7 @@ vuint16m4_t test_vsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_tumu( @@ -2208,7 +2208,7 @@ vuint16m4_t test_vsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_u16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_tumu( @@ -2217,7 +2217,7 @@ vuint16m8_t test_vsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_tumu( @@ -2226,7 +2226,7 @@ vuint16m8_t test_vsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_tumu( @@ -2235,7 +2235,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_tumu( @@ -2244,7 +2244,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_tumu( @@ -2253,7 +2253,7 @@ vuint32m1_t test_vsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_tumu( @@ -2262,7 +2262,7 @@ vuint32m1_t test_vsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_tumu( @@ -2271,7 +2271,7 @@ vuint32m2_t test_vsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_tumu( @@ -2280,7 +2280,7 @@ vuint32m2_t test_vsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_tumu( @@ -2289,7 +2289,7 @@ vuint32m4_t test_vsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_tumu( @@ -2298,7 +2298,7 @@ vuint32m4_t test_vsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_u32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_tumu( @@ -2307,7 +2307,7 @@ vuint32m8_t test_vsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_tumu( @@ -2316,7 +2316,7 @@ vuint32m8_t test_vsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_u64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_tumu( @@ -2325,7 +2325,7 @@ vuint64m1_t test_vsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_tumu( @@ -2334,7 +2334,7 @@ vuint64m1_t test_vsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_u64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_tumu( @@ -2343,7 +2343,7 @@ vuint64m2_t test_vsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_tumu( @@ -2352,7 +2352,7 @@ vuint64m2_t test_vsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_u64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_tumu( @@ -2361,7 +2361,7 @@ vuint64m4_t test_vsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_tumu( @@ -2370,7 +2370,7 @@ vuint64m4_t test_vsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_u64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_tumu( @@ -2379,7 +2379,7 @@ vuint64m8_t test_vsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_mu( @@ -2388,7 +2388,7 @@ vuint64m8_t test_vsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_mu( @@ -2397,7 +2397,7 @@ vint8mf8_t test_vsll_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_mu( @@ -2406,7 +2406,7 @@ vint8mf8_t test_vsll_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_mu( @@ -2415,7 +2415,7 @@ vint8mf4_t test_vsll_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_mu( @@ -2424,7 +2424,7 @@ vint8mf4_t test_vsll_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_mu( @@ -2433,7 +2433,7 @@ vint8mf2_t test_vsll_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_mu( @@ -2442,7 +2442,7 @@ vint8mf2_t test_vsll_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_mu( @@ -2451,7 +2451,7 @@ vint8m1_t test_vsll_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_mu( @@ -2460,7 +2460,7 @@ vint8m1_t test_vsll_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_mu( @@ -2469,7 +2469,7 @@ vint8m2_t test_vsll_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_mu( @@ -2478,7 +2478,7 @@ vint8m2_t test_vsll_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_mu( @@ -2487,7 +2487,7 @@ vint8m4_t test_vsll_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_mu( @@ -2496,7 +2496,7 @@ vint8m4_t test_vsll_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_i8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_mu( @@ -2505,7 +2505,7 @@ vint8m8_t test_vsll_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_mu( @@ -2514,7 +2514,7 @@ vint8m8_t test_vsll_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_mu( @@ -2523,7 +2523,7 @@ vint16mf4_t test_vsll_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_mu( @@ -2532,7 +2532,7 @@ vint16mf4_t test_vsll_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_mu( @@ -2541,7 +2541,7 @@ vint16mf2_t test_vsll_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_mu( @@ -2550,7 +2550,7 @@ vint16mf2_t test_vsll_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_mu( @@ -2559,7 +2559,7 @@ vint16m1_t test_vsll_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_mu( @@ -2568,7 +2568,7 @@ vint16m1_t test_vsll_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_mu( @@ -2577,7 +2577,7 @@ vint16m2_t test_vsll_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_mu( @@ -2586,7 +2586,7 @@ vint16m2_t test_vsll_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_mu( @@ -2595,7 +2595,7 @@ vint16m4_t test_vsll_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_mu( @@ -2604,7 +2604,7 @@ vint16m4_t test_vsll_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_i16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_mu( @@ -2613,7 +2613,7 @@ vint16m8_t test_vsll_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_mu( @@ -2622,7 +2622,7 @@ vint16m8_t test_vsll_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_mu( @@ -2631,7 +2631,7 @@ vint32mf2_t test_vsll_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_mu( @@ -2640,7 +2640,7 @@ vint32mf2_t test_vsll_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_mu( @@ -2649,7 +2649,7 @@ vint32m1_t test_vsll_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_mu( @@ -2658,7 +2658,7 @@ vint32m1_t test_vsll_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_mu( @@ -2667,7 +2667,7 @@ vint32m2_t test_vsll_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_mu( @@ -2676,7 +2676,7 @@ vint32m2_t test_vsll_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_mu( @@ -2685,7 +2685,7 @@ vint32m4_t test_vsll_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_mu( @@ -2694,7 +2694,7 @@ vint32m4_t test_vsll_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_i32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_mu( @@ -2703,7 +2703,7 @@ vint32m8_t test_vsll_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_mu( @@ -2712,7 +2712,7 @@ vint32m8_t test_vsll_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_i64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_mu( @@ -2721,7 +2721,7 @@ vint64m1_t test_vsll_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_mu( @@ -2730,7 +2730,7 @@ vint64m1_t test_vsll_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_i64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_mu( @@ -2739,7 +2739,7 @@ vint64m2_t test_vsll_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_mu( @@ -2748,7 +2748,7 @@ vint64m2_t test_vsll_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_i64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_mu( @@ -2757,7 +2757,7 @@ vint64m4_t test_vsll_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_mu( @@ -2766,7 +2766,7 @@ vint64m4_t test_vsll_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_i64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_i64m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_mu( @@ -2775,7 +2775,7 @@ vint64m8_t test_vsll_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_i64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_i64m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_mu( @@ -2784,7 +2784,7 @@ vint64m8_t test_vsll_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll_vv_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_mu( @@ -2793,7 +2793,7 @@ vuint8mf8_t test_vsll_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_mu( @@ -2802,7 +2802,7 @@ vuint8mf8_t test_vsll_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll_vv_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_mu( @@ -2811,7 +2811,7 @@ vuint8mf4_t test_vsll_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_mu( @@ -2820,7 +2820,7 @@ vuint8mf4_t test_vsll_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll_vv_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_mu( @@ -2829,7 +2829,7 @@ vuint8mf2_t test_vsll_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_mu( @@ -2838,7 +2838,7 @@ vuint8mf2_t test_vsll_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll_vv_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_mu( @@ -2847,7 +2847,7 @@ vuint8m1_t test_vsll_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_mu( @@ -2856,7 +2856,7 @@ vuint8m1_t test_vsll_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll_vv_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_mu( @@ -2865,7 +2865,7 @@ vuint8m2_t test_vsll_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_mu( @@ -2874,7 +2874,7 @@ vuint8m2_t test_vsll_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll_vv_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_mu( @@ -2883,7 +2883,7 @@ vuint8m4_t test_vsll_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_mu( @@ -2892,7 +2892,7 @@ vuint8m4_t test_vsll_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll_vv_u8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_mu( @@ -2901,7 +2901,7 @@ vuint8m8_t test_vsll_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_mu( @@ -2910,7 +2910,7 @@ vuint8m8_t test_vsll_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll_vv_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_mu( @@ -2919,7 +2919,7 @@ vuint16mf4_t test_vsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_mu( @@ -2928,7 +2928,7 @@ vuint16mf4_t test_vsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll_vv_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_mu( @@ -2937,7 +2937,7 @@ vuint16mf2_t test_vsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_mu( @@ -2946,7 +2946,7 @@ vuint16mf2_t test_vsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll_vv_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_mu( @@ -2955,7 +2955,7 @@ vuint16m1_t test_vsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_mu( @@ -2964,7 +2964,7 @@ vuint16m1_t test_vsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll_vv_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_mu( @@ -2973,7 +2973,7 @@ vuint16m2_t test_vsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_mu( @@ -2982,7 +2982,7 @@ vuint16m2_t test_vsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll_vv_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_mu( @@ -2991,7 +2991,7 @@ vuint16m4_t test_vsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_mu( @@ -3000,7 +3000,7 @@ vuint16m4_t test_vsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll_vv_u16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_mu( @@ -3009,7 +3009,7 @@ vuint16m8_t test_vsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_mu( @@ -3018,7 +3018,7 @@ vuint16m8_t test_vsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll_vv_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_mu( @@ -3027,7 +3027,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_mu( @@ -3036,7 +3036,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll_vv_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_mu( @@ -3045,7 +3045,7 @@ vuint32m1_t test_vsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_mu( @@ -3054,7 +3054,7 @@ vuint32m1_t test_vsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll_vv_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_mu( @@ -3063,7 +3063,7 @@ vuint32m2_t test_vsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_mu( @@ -3072,7 +3072,7 @@ vuint32m2_t test_vsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll_vv_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_mu( @@ -3081,7 +3081,7 @@ vuint32m4_t test_vsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_mu( @@ -3090,7 +3090,7 @@ vuint32m4_t test_vsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll_vv_u32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_mu( @@ -3099,7 +3099,7 @@ vuint32m8_t test_vsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_mu( @@ -3108,7 +3108,7 @@ vuint32m8_t test_vsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll_vv_u64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_mu( @@ -3117,7 +3117,7 @@ vuint64m1_t test_vsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_mu( @@ -3126,7 +3126,7 @@ vuint64m1_t test_vsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll_vv_u64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_mu( @@ -3135,7 +3135,7 @@ vuint64m2_t test_vsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_mu( @@ -3144,7 +3144,7 @@ vuint64m2_t test_vsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll_vv_u64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_mu( @@ -3153,7 +3153,7 @@ vuint64m4_t test_vsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_mu( @@ -3162,7 +3162,7 @@ vuint64m4_t test_vsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll_vv_u64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vv_u64m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_mu( @@ -3171,6 +3171,6 @@ vuint64m8_t test_vsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsll_vx_u64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsll_vx_u64m8_mu(mask, maskedoff, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsmul.c index 9fd26e783712..57ffe733d36d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsmul.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsmul_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsmul_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsmul_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsmul_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsmul_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsmul_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsmul_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsmul_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsmul_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsmul_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsmul_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsmul_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsmul_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsmul_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsmul_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsmul_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsmul_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsmul_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsmul_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsmul_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsmul_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsmul_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsmul_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsmul_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsmul_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsmul_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsmul_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsmul_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsmul_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsmul_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsmul_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsmul_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsmul_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsmul_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsmul_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsmul_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsmul_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsmul_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsmul_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsmul_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsmul_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsmul_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsmul_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsmul_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsmul_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsmul_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsmul_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsmul_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsmul_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsmul_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsmul_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsmul_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsmul_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsmul_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsmul_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsmul_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsmul_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsmul_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsra.c index e22521486c6e..7aef6c426029 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsra.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsra_vv_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vsra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vsra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsra_vv_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vsra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vsra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsra_vv_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vsra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vsra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsra_vv_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vsra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vsra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsra_vv_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vsra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vsra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsra_vv_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vsra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vsra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsra_vv_i8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vsra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vsra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsra_vv_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vsra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vsra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsra_vv_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vsra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vsra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsra_vv_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vsra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vsra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsra_vv_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vsra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vsra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsra_vv_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vsra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vsra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsra_vv_i16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vsra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vsra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsra_vv_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsra_vv_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vsra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vsra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsra_vv_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vsra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vsra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsra_vv_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vsra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vsra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsra_vv_i32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vsra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vsra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsra_vv_i64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vsra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vsra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsra_vv_i64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vsra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vsra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsra_vv_i64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vsra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vsra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsra_vv_i64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vsra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vsra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsra_vv_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vsra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vsra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsra_vv_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vsra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vsra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsra_vv_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vsra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vsra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsra_vv_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vsra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vsra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsra_vv_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vsra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vsra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsra_vv_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vsra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vsra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsra_vv_i8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vsra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vsra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsra_vv_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vsra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vsra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsra_vv_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vsra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vsra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsra_vv_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vsra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vsra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsra_vv_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vsra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vsra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsra_vv_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vsra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vsra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsra_vv_i16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vsra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vsra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsra_vv_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vsra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vsra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsra_vv_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vsra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vsra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsra_vv_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vsra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vsra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsra_vv_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vsra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vsra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsra_vv_i32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vsra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vsra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsra_vv_i64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vsra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vsra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsra_vv_i64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vsra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vsra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsra_vv_i64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vsra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vsra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsra_vv_i64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vsra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vsra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsra_vv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vsra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vsra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsra_vv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vsra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vsra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsra_vv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vsra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vsra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsra_vv_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vsra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vsra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsra_vv_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vsra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vsra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsra_vv_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vsra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vsra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsra_vv_i8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vsra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vsra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsra_vv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vsra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vsra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsra_vv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vsra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vsra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsra_vv_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vsra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vsra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsra_vv_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vsra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vsra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsra_vv_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vsra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vsra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsra_vv_i16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vsra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vsra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsra_vv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsra_vv_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vsra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vsra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsra_vv_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vsra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vsra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsra_vv_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vsra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vsra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsra_vv_i32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vsra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vsra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsra_vv_i64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vsra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vsra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsra_vv_i64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vsra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vsra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsra_vv_i64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vsra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vsra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsra_vv_i64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vsra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vsra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsra_vv_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vsra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vsra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsra_vv_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vsra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vsra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsra_vv_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vsra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vsra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsra_vv_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vsra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vsra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsra_vv_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vsra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vsra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsra_vv_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vsra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vsra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsra_vv_i8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vsra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vsra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsra_vv_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vsra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vsra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsra_vv_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vsra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vsra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsra_vv_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vsra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vsra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsra_vv_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vsra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vsra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsra_vv_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vsra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vsra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsra_vv_i16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vsra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vsra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsra_vv_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vsra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vsra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsra_vv_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vsra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vsra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsra_vv_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vsra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vsra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsra_vv_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vsra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vsra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsra_vv_i32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vsra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vsra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsra_vv_i64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vsra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vsra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsra_vv_i64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vsra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vsra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsra_vv_i64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vsra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vsra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsra_vv_i64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vv_i64m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vsra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsra_vx_i64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsra_vx_i64m8_mu(mask, maskedoff, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsrl.c index 43d6a1da97fd..8c3b9d121865 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsrl.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsrl_vv_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsrl_vv_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsrl_vv_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsrl_vv_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vsrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vsrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsrl_vv_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vsrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vsrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsrl_vv_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vsrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vsrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsrl_vv_u8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m8_t test_vsrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_tu( @@ -138,7 +138,7 @@ vuint8m8_t test_vsrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shi // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsrl_vv_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_tu( @@ -147,7 +147,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_tu( @@ -156,7 +156,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsrl_vv_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_tu( @@ -165,7 +165,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_tu( @@ -174,7 +174,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsrl_vv_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_tu( @@ -183,7 +183,7 @@ vuint16m1_t test_vsrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vsrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsrl_vv_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vsrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_tu( @@ -210,7 +210,7 @@ vuint16m2_t test_vsrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsrl_vv_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_tu( @@ -219,7 +219,7 @@ vuint16m4_t test_vsrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_tu( @@ -228,7 +228,7 @@ vuint16m4_t test_vsrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsrl_vv_u16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_tu( @@ -237,7 +237,7 @@ vuint16m8_t test_vsrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint16m8_t test_vsrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsrl_vv_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsrl_vv_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vsrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vsrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsrl_vv_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_tu( @@ -291,7 +291,7 @@ vuint32m2_t test_vsrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_tu( @@ -300,7 +300,7 @@ vuint32m2_t test_vsrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsrl_vv_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_tu( @@ -309,7 +309,7 @@ vuint32m4_t test_vsrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_tu( @@ -318,7 +318,7 @@ vuint32m4_t test_vsrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsrl_vv_u32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_tu( @@ -327,7 +327,7 @@ vuint32m8_t test_vsrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_tu( @@ -336,7 +336,7 @@ vuint32m8_t test_vsrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsrl_vv_u64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_tu( @@ -345,7 +345,7 @@ vuint64m1_t test_vsrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_tu( @@ -354,7 +354,7 @@ vuint64m1_t test_vsrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsrl_vv_u64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_tu( @@ -363,7 +363,7 @@ vuint64m2_t test_vsrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_tu( @@ -372,7 +372,7 @@ vuint64m2_t test_vsrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsrl_vv_u64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_tu( @@ -381,7 +381,7 @@ vuint64m4_t test_vsrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_tu( @@ -390,7 +390,7 @@ vuint64m4_t test_vsrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsrl_vv_u64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m8_t test_vsrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vsrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsrl_vv_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_tum( @@ -417,7 +417,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_tum( @@ -426,7 +426,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsrl_vv_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_tum( @@ -435,7 +435,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_tum( @@ -444,7 +444,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsrl_vv_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_tum( @@ -453,7 +453,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_tum( @@ -462,7 +462,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsrl_vv_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_tum( @@ -471,7 +471,7 @@ vuint8m1_t test_vsrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_tum( @@ -480,7 +480,7 @@ vuint8m1_t test_vsrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsrl_vv_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_tum( @@ -489,7 +489,7 @@ vuint8m2_t test_vsrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_tum( @@ -498,7 +498,7 @@ vuint8m2_t test_vsrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsrl_vv_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_tum( @@ -507,7 +507,7 @@ vuint8m4_t test_vsrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_tum( @@ -516,7 +516,7 @@ vuint8m4_t test_vsrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsrl_vv_u8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_tum( @@ -525,7 +525,7 @@ vuint8m8_t test_vsrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_tum( @@ -534,7 +534,7 @@ vuint8m8_t test_vsrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsrl_vv_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_tum( @@ -543,7 +543,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_tum( @@ -552,7 +552,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsrl_vv_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_tum( @@ -561,7 +561,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_tum( @@ -570,7 +570,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsrl_vv_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_tum( @@ -579,7 +579,7 @@ vuint16m1_t test_vsrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_tum( @@ -588,7 +588,7 @@ vuint16m1_t test_vsrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsrl_vv_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_tum( @@ -597,7 +597,7 @@ vuint16m2_t test_vsrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_tum( @@ -606,7 +606,7 @@ vuint16m2_t test_vsrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsrl_vv_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_tum( @@ -615,7 +615,7 @@ vuint16m4_t test_vsrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_tum( @@ -624,7 +624,7 @@ vuint16m4_t test_vsrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsrl_vv_u16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_tum( @@ -633,7 +633,7 @@ vuint16m8_t test_vsrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tum( @@ -642,7 +642,7 @@ vuint16m8_t test_vsrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsrl_vv_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tum( @@ -651,7 +651,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_tum( @@ -660,7 +660,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsrl_vv_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_tum( @@ -669,7 +669,7 @@ vuint32m1_t test_vsrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_tum( @@ -678,7 +678,7 @@ vuint32m1_t test_vsrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsrl_vv_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_tum( @@ -687,7 +687,7 @@ vuint32m2_t test_vsrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_tum( @@ -696,7 +696,7 @@ vuint32m2_t test_vsrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsrl_vv_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_tum( @@ -705,7 +705,7 @@ vuint32m4_t test_vsrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_tum( @@ -714,7 +714,7 @@ vuint32m4_t test_vsrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsrl_vv_u32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_tum( @@ -723,7 +723,7 @@ vuint32m8_t test_vsrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_tum( @@ -732,7 +732,7 @@ vuint32m8_t test_vsrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsrl_vv_u64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_tum( @@ -741,7 +741,7 @@ vuint64m1_t test_vsrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_tum( @@ -750,7 +750,7 @@ vuint64m1_t test_vsrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsrl_vv_u64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_tum( @@ -759,7 +759,7 @@ vuint64m2_t test_vsrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_tum( @@ -768,7 +768,7 @@ vuint64m2_t test_vsrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsrl_vv_u64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_tum( @@ -777,7 +777,7 @@ vuint64m4_t test_vsrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_tum( @@ -786,7 +786,7 @@ vuint64m4_t test_vsrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsrl_vv_u64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m8_t test_vsrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vsrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsrl_vv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_tumu( @@ -813,7 +813,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_tumu( @@ -822,7 +822,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsrl_vv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_tumu( @@ -831,7 +831,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_tumu( @@ -840,7 +840,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsrl_vv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_tumu( @@ -849,7 +849,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_tumu( @@ -858,7 +858,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsrl_vv_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_tumu( @@ -867,7 +867,7 @@ vuint8m1_t test_vsrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_tumu( @@ -876,7 +876,7 @@ vuint8m1_t test_vsrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsrl_vv_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_tumu( @@ -885,7 +885,7 @@ vuint8m2_t test_vsrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_tumu( @@ -894,7 +894,7 @@ vuint8m2_t test_vsrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsrl_vv_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_tumu( @@ -903,7 +903,7 @@ vuint8m4_t test_vsrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_tumu( @@ -912,7 +912,7 @@ vuint8m4_t test_vsrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsrl_vv_u8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_tumu( @@ -921,7 +921,7 @@ vuint8m8_t test_vsrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_tumu( @@ -930,7 +930,7 @@ vuint8m8_t test_vsrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsrl_vv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_tumu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_tumu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsrl_vv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_tumu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_tumu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsrl_vv_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_tumu( @@ -975,7 +975,7 @@ vuint16m1_t test_vsrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_tumu( @@ -984,7 +984,7 @@ vuint16m1_t test_vsrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsrl_vv_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_tumu( @@ -993,7 +993,7 @@ vuint16m2_t test_vsrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_tumu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vsrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsrl_vv_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_tumu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vsrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_tumu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vsrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsrl_vv_u16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_tumu( @@ -1029,7 +1029,7 @@ vuint16m8_t test_vsrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_tumu( @@ -1038,7 +1038,7 @@ vuint16m8_t test_vsrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsrl_vv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_tumu( @@ -1047,7 +1047,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_tumu( @@ -1056,7 +1056,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsrl_vv_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_tumu( @@ -1065,7 +1065,7 @@ vuint32m1_t test_vsrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_tumu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vsrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsrl_vv_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_tumu( @@ -1083,7 +1083,7 @@ vuint32m2_t test_vsrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_tumu( @@ -1092,7 +1092,7 @@ vuint32m2_t test_vsrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsrl_vv_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_tumu( @@ -1101,7 +1101,7 @@ vuint32m4_t test_vsrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_tumu( @@ -1110,7 +1110,7 @@ vuint32m4_t test_vsrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsrl_vv_u32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_tumu( @@ -1119,7 +1119,7 @@ vuint32m8_t test_vsrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_tumu( @@ -1128,7 +1128,7 @@ vuint32m8_t test_vsrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsrl_vv_u64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_tumu( @@ -1137,7 +1137,7 @@ vuint64m1_t test_vsrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_tumu( @@ -1146,7 +1146,7 @@ vuint64m1_t test_vsrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsrl_vv_u64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_tumu( @@ -1155,7 +1155,7 @@ vuint64m2_t test_vsrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_tumu( @@ -1164,7 +1164,7 @@ vuint64m2_t test_vsrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsrl_vv_u64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_tumu( @@ -1173,7 +1173,7 @@ vuint64m4_t test_vsrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_tumu( @@ -1182,7 +1182,7 @@ vuint64m4_t test_vsrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsrl_vv_u64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m8_t test_vsrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vsrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsrl_vv_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_mu( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_mu( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsrl_vv_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_mu( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_mu( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsrl_vv_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_mu( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_mu( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsrl_vv_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_mu( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vsrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_mu( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vsrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsrl_vv_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_mu( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vsrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_mu( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vsrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsrl_vv_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_mu( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vsrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_mu( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vsrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsrl_vv_u8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_mu( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vsrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_mu( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vsrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsrl_vv_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_mu( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_mu( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsrl_vv_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_mu( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_mu( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsrl_vv_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_mu( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vsrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_mu( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vsrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsrl_vv_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_mu( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vsrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_mu( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vsrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsrl_vv_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_mu( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vsrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_mu( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vsrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsrl_vv_u16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_mu( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vsrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_mu( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vsrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsrl_vv_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_mu( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_mu( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsrl_vv_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_mu( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vsrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_mu( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vsrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsrl_vv_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_mu( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vsrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_mu( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vsrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsrl_vv_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_mu( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vsrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_mu( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vsrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsrl_vv_u32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_mu( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vsrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_mu( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vsrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsrl_vv_u64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_mu( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vsrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_mu( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vsrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsrl_vv_u64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_mu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vsrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_mu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vsrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsrl_vv_u64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_mu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vsrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_mu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vsrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsrl_vv_u64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vv_u64m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vsrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsrl_vx_u64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vsrl_vx_u64m8_mu(mask, maskedoff, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssra.c index e36cbb0e67b6..e5d9db55e350 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssra.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssra_vv_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssra_vv_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssra_vv_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssra_vv_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssra_vv_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssra_vv_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssra_vv_i8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssra_vv_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssra_vv_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssra_vv_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssra_vv_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssra_vv_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssra_vv_i16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssra_vv_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssra_vv_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssra_vv_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssra_vv_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssra_vv_i32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssra_vv_i64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssra_vv_i64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssra_vv_i64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssra_vv_i64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssra_vv_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssra_vv_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssra_vv_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssra_vv_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssra_vv_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssra_vv_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssra_vv_i8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssra_vv_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssra_vv_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssra_vv_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssra_vv_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssra_vv_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssra_vv_i16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssra_vv_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssra_vv_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssra_vv_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssra_vv_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssra_vv_i32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssra_vv_i64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssra_vv_i64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssra_vv_i64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssra_vv_i64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssra_vv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssra_vv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssra_vv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssra_vv_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssra_vv_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssra_vv_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssra_vv_i8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssra_vv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssra_vv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssra_vv_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssra_vv_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssra_vv_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssra_vv_i16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssra_vv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssra_vv_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssra_vv_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssra_vv_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssra_vv_i32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssra_vv_i64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssra_vv_i64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssra_vv_i64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssra_vv_i64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssra_vv_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssra_vv_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssra_vv_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssra_vv_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssra_vv_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssra_vv_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssra_vv_i8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssra_vv_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssra_vv_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssra_vv_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssra_vv_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssra_vv_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssra_vv_i16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssra_vv_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssra_vv_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssra_vv_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssra_vv_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssra_vv_i32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssra_vv_i64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssra_vv_i64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssra_vv_i64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssra_vv_i64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vv_i64m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vssra_vx_i64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssra_vx_i64m8_mu(mask, maskedoff, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssrl.c index 57158ac93752..a7853e577fc3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssrl.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssrl_vv_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssrl_vv_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssrl_vv_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssrl_vv_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssrl_vv_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssrl_vv_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssrl_vv_u8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_tu( @@ -138,7 +138,7 @@ vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t sh // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssrl_vv_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_tu( @@ -147,7 +147,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_tu( @@ -156,7 +156,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssrl_vv_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_tu( @@ -165,7 +165,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_tu( @@ -174,7 +174,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssrl_vv_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_tu( @@ -183,7 +183,7 @@ vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssrl_vv_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_tu( @@ -210,7 +210,7 @@ vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssrl_vv_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_tu( @@ -219,7 +219,7 @@ vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_tu( @@ -228,7 +228,7 @@ vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssrl_vv_u16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_tu( @@ -237,7 +237,7 @@ vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssrl_vv_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32mf2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32mf2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssrl_vv_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssrl_vv_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_tu( @@ -291,7 +291,7 @@ vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_tu( @@ -300,7 +300,7 @@ vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssrl_vv_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_tu( @@ -309,7 +309,7 @@ vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_tu( @@ -318,7 +318,7 @@ vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssrl_vv_u32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_tu( @@ -327,7 +327,7 @@ vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_tu( @@ -336,7 +336,7 @@ vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssrl_vv_u64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_tu( @@ -345,7 +345,7 @@ vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m1_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m1_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_tu( @@ -354,7 +354,7 @@ vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssrl_vv_u64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_tu( @@ -363,7 +363,7 @@ vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m2_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m2_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_tu( @@ -372,7 +372,7 @@ vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssrl_vv_u64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_tu( @@ -381,7 +381,7 @@ vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m4_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m4_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_tu( @@ -390,7 +390,7 @@ vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssrl_vv_u64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m8_tu(maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m8_tu(maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssrl_vv_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_tum( @@ -417,7 +417,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_tum( @@ -426,7 +426,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssrl_vv_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_tum( @@ -435,7 +435,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_tum( @@ -444,7 +444,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssrl_vv_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_tum( @@ -453,7 +453,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_tum( @@ -462,7 +462,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssrl_vv_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_tum( @@ -471,7 +471,7 @@ vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_tum( @@ -480,7 +480,7 @@ vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssrl_vv_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_tum( @@ -489,7 +489,7 @@ vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_tum( @@ -498,7 +498,7 @@ vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssrl_vv_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_tum( @@ -507,7 +507,7 @@ vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_tum( @@ -516,7 +516,7 @@ vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssrl_vv_u8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_tum( @@ -525,7 +525,7 @@ vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_tum( @@ -534,7 +534,7 @@ vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssrl_vv_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_tum( @@ -543,7 +543,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_tum( @@ -552,7 +552,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssrl_vv_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_tum( @@ -561,7 +561,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_tum( @@ -570,7 +570,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssrl_vv_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_tum( @@ -579,7 +579,7 @@ vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_tum( @@ -588,7 +588,7 @@ vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssrl_vv_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_tum( @@ -597,7 +597,7 @@ vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_tum( @@ -606,7 +606,7 @@ vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssrl_vv_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_tum( @@ -615,7 +615,7 @@ vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_tum( @@ -624,7 +624,7 @@ vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssrl_vv_u16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_tum( @@ -633,7 +633,7 @@ vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tum( @@ -642,7 +642,7 @@ vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssrl_vv_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tum( @@ -651,7 +651,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32mf2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32mf2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_tum( @@ -660,7 +660,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssrl_vv_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_tum( @@ -669,7 +669,7 @@ vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_tum( @@ -678,7 +678,7 @@ vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssrl_vv_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_tum( @@ -687,7 +687,7 @@ vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_tum( @@ -696,7 +696,7 @@ vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssrl_vv_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_tum( @@ -705,7 +705,7 @@ vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_tum( @@ -714,7 +714,7 @@ vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssrl_vv_u32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_tum( @@ -723,7 +723,7 @@ vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_tum( @@ -732,7 +732,7 @@ vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssrl_vv_u64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_tum( @@ -741,7 +741,7 @@ vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m1_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m1_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_tum( @@ -750,7 +750,7 @@ vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssrl_vv_u64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_tum( @@ -759,7 +759,7 @@ vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m2_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m2_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_tum( @@ -768,7 +768,7 @@ vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssrl_vv_u64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_tum( @@ -777,7 +777,7 @@ vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m4_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m4_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_tum( @@ -786,7 +786,7 @@ vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssrl_vv_u64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m8_tum(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m8_tum(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssrl_vv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_tumu( @@ -813,7 +813,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_tumu( @@ -822,7 +822,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssrl_vv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_tumu( @@ -831,7 +831,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_tumu( @@ -840,7 +840,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssrl_vv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_tumu( @@ -849,7 +849,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_tumu( @@ -858,7 +858,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssrl_vv_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_tumu( @@ -867,7 +867,7 @@ vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_tumu( @@ -876,7 +876,7 @@ vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssrl_vv_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_tumu( @@ -885,7 +885,7 @@ vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_tumu( @@ -894,7 +894,7 @@ vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssrl_vv_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_tumu( @@ -903,7 +903,7 @@ vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_tumu( @@ -912,7 +912,7 @@ vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssrl_vv_u8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_tumu( @@ -921,7 +921,7 @@ vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_tumu( @@ -930,7 +930,7 @@ vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssrl_vv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_tumu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_tumu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssrl_vv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_tumu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_tumu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssrl_vv_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_tumu( @@ -975,7 +975,7 @@ vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_tumu( @@ -984,7 +984,7 @@ vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssrl_vv_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_tumu( @@ -993,7 +993,7 @@ vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_tumu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssrl_vv_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_tumu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_tumu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssrl_vv_u16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_tumu( @@ -1029,7 +1029,7 @@ vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_tumu( @@ -1038,7 +1038,7 @@ vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssrl_vv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_tumu( @@ -1047,7 +1047,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_tumu( @@ -1056,7 +1056,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssrl_vv_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_tumu( @@ -1065,7 +1065,7 @@ vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_tumu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssrl_vv_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_tumu( @@ -1083,7 +1083,7 @@ vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_tumu( @@ -1092,7 +1092,7 @@ vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssrl_vv_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_tumu( @@ -1101,7 +1101,7 @@ vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_tumu( @@ -1110,7 +1110,7 @@ vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssrl_vv_u32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_tumu( @@ -1119,7 +1119,7 @@ vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_tumu( @@ -1128,7 +1128,7 @@ vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssrl_vv_u64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_tumu( @@ -1137,7 +1137,7 @@ vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m1_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m1_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_tumu( @@ -1146,7 +1146,7 @@ vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssrl_vv_u64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_tumu( @@ -1155,7 +1155,7 @@ vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m2_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m2_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_tumu( @@ -1164,7 +1164,7 @@ vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssrl_vv_u64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_tumu( @@ -1173,7 +1173,7 @@ vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m4_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m4_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_tumu( @@ -1182,7 +1182,7 @@ vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssrl_vv_u64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m8_tumu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m8_tumu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssrl_vv_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_mu( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_mu( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssrl_vv_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_mu( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_mu( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssrl_vv_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_mu( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_mu( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssrl_vv_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_mu( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_mu( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssrl_vv_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_mu( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_mu( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssrl_vv_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_mu( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_mu( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssrl_vv_u8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_mu( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u8m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u8m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_mu( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssrl_vv_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_mu( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_mu( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssrl_vv_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_mu( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_mu( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssrl_vv_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_mu( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_mu( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssrl_vv_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_mu( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_mu( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssrl_vv_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_mu( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_mu( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssrl_vv_u16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_mu( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u16m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u16m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_mu( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssrl_vv_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_mu( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32mf2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32mf2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_mu( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssrl_vv_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_mu( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_mu( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssrl_vv_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_mu( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_mu( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssrl_vv_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_mu( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_mu( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssrl_vv_u32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_mu( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u32m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u32m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_mu( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssrl_vv_u64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_mu( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m1_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m1_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_mu( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssrl_vv_u64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_mu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m2_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m2_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_mu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssrl_vv_u64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_mu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m4_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m4_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_mu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssrl_vv_u64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vv_u64m8_mu(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vssrl_vx_u64m8_mu(mask, maskedoff, op1, shift, vl); + return __riscv_vssrl_vx_u64m8_mu(mask, maskedoff, op1, shift, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssub.c index 50041413f2a7..b3dc364c6a1d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssub.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vssub_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vssub_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vssub_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vssub_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vssub_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vssub_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vssub_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vssub_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vssub_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vssub_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vssub_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vssub_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vssub_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vssub_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vssub_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vssub_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vssub_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vssub_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vssub_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vssub_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vssub_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vssub_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_tum( @@ -408,7 +408,7 @@ vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vssub_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_tum( @@ -417,7 +417,7 @@ vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_tum( @@ -426,7 +426,7 @@ vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vssub_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_tum( @@ -435,7 +435,7 @@ vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_tum( @@ -444,7 +444,7 @@ vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vssub_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_tum( @@ -453,7 +453,7 @@ vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_tum( @@ -462,7 +462,7 @@ vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vssub_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_tum( @@ -471,7 +471,7 @@ vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_tum( @@ -480,7 +480,7 @@ vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vssub_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_tum( @@ -489,7 +489,7 @@ vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_tum( @@ -498,7 +498,7 @@ vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vssub_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_tum( @@ -507,7 +507,7 @@ vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_tum( @@ -516,7 +516,7 @@ vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vssub_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_tum( @@ -525,7 +525,7 @@ vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_tum( @@ -534,7 +534,7 @@ vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vssub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_tum( @@ -543,7 +543,7 @@ vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_tum( @@ -552,7 +552,7 @@ vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vssub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_tum( @@ -561,7 +561,7 @@ vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_tum( @@ -570,7 +570,7 @@ vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vssub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_tum( @@ -579,7 +579,7 @@ vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_tum( @@ -588,7 +588,7 @@ vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vssub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_tum( @@ -597,7 +597,7 @@ vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_tum( @@ -606,7 +606,7 @@ vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vssub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_tum( @@ -615,7 +615,7 @@ vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_tum( @@ -624,7 +624,7 @@ vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vssub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_tum( @@ -633,7 +633,7 @@ vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tum( @@ -642,7 +642,7 @@ vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vssub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tum( @@ -651,7 +651,7 @@ vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_tum( @@ -660,7 +660,7 @@ vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vssub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_tum( @@ -669,7 +669,7 @@ vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_tum( @@ -678,7 +678,7 @@ vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vssub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_tum( @@ -687,7 +687,7 @@ vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_tum( @@ -696,7 +696,7 @@ vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vssub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_tum( @@ -705,7 +705,7 @@ vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_tum( @@ -714,7 +714,7 @@ vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vssub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_tum( @@ -723,7 +723,7 @@ vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_tum( @@ -732,7 +732,7 @@ vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vssub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_tum( @@ -741,7 +741,7 @@ vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_tum( @@ -750,7 +750,7 @@ vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vssub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_tum( @@ -759,7 +759,7 @@ vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_tum( @@ -768,7 +768,7 @@ vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vssub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_tum( @@ -777,7 +777,7 @@ vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_tum( @@ -786,7 +786,7 @@ vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vssub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_tum( @@ -795,7 +795,7 @@ vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_tumu( @@ -804,7 +804,7 @@ vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vssub_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_tumu( @@ -813,7 +813,7 @@ vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_tumu( @@ -822,7 +822,7 @@ vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vssub_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_tumu( @@ -831,7 +831,7 @@ vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_tumu( @@ -840,7 +840,7 @@ vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vssub_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_tumu( @@ -849,7 +849,7 @@ vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_tumu( @@ -858,7 +858,7 @@ vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vssub_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_tumu( @@ -867,7 +867,7 @@ vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_tumu( @@ -876,7 +876,7 @@ vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vssub_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_tumu( @@ -885,7 +885,7 @@ vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_tumu( @@ -894,7 +894,7 @@ vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vssub_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_tumu( @@ -903,7 +903,7 @@ vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_tumu( @@ -912,7 +912,7 @@ vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vssub_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_tumu( @@ -921,7 +921,7 @@ vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_tumu( @@ -930,7 +930,7 @@ vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vssub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_tumu( @@ -939,7 +939,7 @@ vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_tumu( @@ -948,7 +948,7 @@ vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vssub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_tumu( @@ -957,7 +957,7 @@ vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_tumu( @@ -966,7 +966,7 @@ vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vssub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_tumu( @@ -975,7 +975,7 @@ vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_tumu( @@ -984,7 +984,7 @@ vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vssub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_tumu( @@ -993,7 +993,7 @@ vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_tumu( @@ -1002,7 +1002,7 @@ vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vssub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_tumu( @@ -1011,7 +1011,7 @@ vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_tumu( @@ -1020,7 +1020,7 @@ vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vssub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_tumu( @@ -1029,7 +1029,7 @@ vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_tumu( @@ -1038,7 +1038,7 @@ vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vssub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_tumu( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_tumu( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vssub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_tumu( @@ -1065,7 +1065,7 @@ vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_tumu( @@ -1074,7 +1074,7 @@ vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vssub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_tumu( @@ -1083,7 +1083,7 @@ vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_tumu( @@ -1092,7 +1092,7 @@ vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vssub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_tumu( @@ -1101,7 +1101,7 @@ vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_tumu( @@ -1110,7 +1110,7 @@ vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vssub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_tumu( @@ -1119,7 +1119,7 @@ vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_tumu( @@ -1128,7 +1128,7 @@ vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vssub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_tumu( @@ -1137,7 +1137,7 @@ vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_tumu( @@ -1146,7 +1146,7 @@ vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vssub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_tumu( @@ -1155,7 +1155,7 @@ vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_tumu( @@ -1164,7 +1164,7 @@ vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vssub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_tumu( @@ -1173,7 +1173,7 @@ vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_tumu( @@ -1182,7 +1182,7 @@ vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vssub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_tumu( @@ -1191,7 +1191,7 @@ vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_mu( @@ -1200,7 +1200,7 @@ vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vssub_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_mu( @@ -1209,7 +1209,7 @@ vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_mu( @@ -1218,7 +1218,7 @@ vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vssub_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_mu( @@ -1227,7 +1227,7 @@ vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_mu( @@ -1236,7 +1236,7 @@ vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vssub_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_mu( @@ -1245,7 +1245,7 @@ vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_mu( @@ -1254,7 +1254,7 @@ vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vssub_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_mu( @@ -1263,7 +1263,7 @@ vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_mu( @@ -1272,7 +1272,7 @@ vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vssub_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_mu( @@ -1281,7 +1281,7 @@ vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_mu( @@ -1290,7 +1290,7 @@ vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vssub_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_mu( @@ -1299,7 +1299,7 @@ vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_mu( @@ -1308,7 +1308,7 @@ vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vssub_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_mu( @@ -1317,7 +1317,7 @@ vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vssub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_mu( @@ -1326,7 +1326,7 @@ vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vssub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_mu( @@ -1335,7 +1335,7 @@ vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_mu( @@ -1344,7 +1344,7 @@ vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vssub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_mu( @@ -1353,7 +1353,7 @@ vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_mu( @@ -1362,7 +1362,7 @@ vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vssub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_mu( @@ -1371,7 +1371,7 @@ vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_mu( @@ -1380,7 +1380,7 @@ vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vssub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_mu( @@ -1389,7 +1389,7 @@ vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_mu( @@ -1398,7 +1398,7 @@ vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vssub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_mu( @@ -1407,7 +1407,7 @@ vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_mu( @@ -1416,7 +1416,7 @@ vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vssub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_mu( @@ -1425,7 +1425,7 @@ vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vssub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_mu( @@ -1434,7 +1434,7 @@ vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vssub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_mu( @@ -1443,7 +1443,7 @@ vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_mu( @@ -1452,7 +1452,7 @@ vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vssub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_mu( @@ -1461,7 +1461,7 @@ vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_mu( @@ -1470,7 +1470,7 @@ vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vssub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_mu( @@ -1479,7 +1479,7 @@ vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_mu( @@ -1488,7 +1488,7 @@ vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vssub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_mu( @@ -1497,7 +1497,7 @@ vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_mu( @@ -1506,7 +1506,7 @@ vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vssub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_mu( @@ -1515,7 +1515,7 @@ vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vssub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_mu( @@ -1524,7 +1524,7 @@ vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vssub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_mu( @@ -1533,7 +1533,7 @@ vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_mu( @@ -1542,7 +1542,7 @@ vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vssub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_mu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_mu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vssub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_mu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_mu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vssub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_mu( @@ -1587,6 +1587,6 @@ vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vssub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssubu.c index eb70d54a1aec..f9a390c33b89 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vssubu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vssubu_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_tu( @@ -21,7 +21,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_tu( @@ -30,7 +30,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vssubu_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_tu( @@ -39,7 +39,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_tu( @@ -48,7 +48,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vssubu_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_tu( @@ -57,7 +57,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_tu( @@ -66,7 +66,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vssubu_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_tu( @@ -75,7 +75,7 @@ vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_tu( @@ -84,7 +84,7 @@ vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vssubu_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_tu( @@ -93,7 +93,7 @@ vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_tu( @@ -102,7 +102,7 @@ vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vssubu_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_tu( @@ -111,7 +111,7 @@ vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_tu( @@ -120,7 +120,7 @@ vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vssubu_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_tu( @@ -129,7 +129,7 @@ vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_tu( @@ -138,7 +138,7 @@ vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vssubu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_tu( @@ -147,7 +147,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_tu( @@ -156,7 +156,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vssubu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_tu( @@ -165,7 +165,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_tu( @@ -174,7 +174,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vssubu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_tu( @@ -183,7 +183,7 @@ vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_tu( @@ -192,7 +192,7 @@ vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vssubu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_tu( @@ -201,7 +201,7 @@ vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_tu( @@ -210,7 +210,7 @@ vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vssubu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_tu( @@ -219,7 +219,7 @@ vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_tu( @@ -228,7 +228,7 @@ vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vssubu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_tu( @@ -237,7 +237,7 @@ vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vssubu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vssubu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vssubu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_tu( @@ -291,7 +291,7 @@ vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_tu( @@ -300,7 +300,7 @@ vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vssubu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_tu( @@ -309,7 +309,7 @@ vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_tu( @@ -318,7 +318,7 @@ vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vssubu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_tu( @@ -327,7 +327,7 @@ vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_tu( @@ -336,7 +336,7 @@ vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vssubu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_tu( @@ -345,7 +345,7 @@ vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_tu( @@ -354,7 +354,7 @@ vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vssubu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_tu( @@ -363,7 +363,7 @@ vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_tu( @@ -372,7 +372,7 @@ vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vssubu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_tu( @@ -381,7 +381,7 @@ vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_tu( @@ -390,7 +390,7 @@ vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vssubu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_tu( @@ -399,7 +399,7 @@ vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_tum( @@ -408,7 +408,7 @@ vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vssubu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_tum( @@ -417,7 +417,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_tum( @@ -426,7 +426,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vssubu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_tum( @@ -435,7 +435,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_tum( @@ -444,7 +444,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vssubu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_tum( @@ -453,7 +453,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_tum( @@ -462,7 +462,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vssubu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_tum( @@ -471,7 +471,7 @@ vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_tum( @@ -480,7 +480,7 @@ vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vssubu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_tum( @@ -489,7 +489,7 @@ vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_tum( @@ -498,7 +498,7 @@ vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vssubu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_tum( @@ -507,7 +507,7 @@ vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_tum( @@ -516,7 +516,7 @@ vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vssubu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_tum( @@ -525,7 +525,7 @@ vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_tum( @@ -534,7 +534,7 @@ vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vssubu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_tum( @@ -543,7 +543,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_tum( @@ -552,7 +552,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vssubu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_tum( @@ -561,7 +561,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_tum( @@ -570,7 +570,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vssubu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_tum( @@ -579,7 +579,7 @@ vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_tum( @@ -588,7 +588,7 @@ vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vssubu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_tum( @@ -597,7 +597,7 @@ vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_tum( @@ -606,7 +606,7 @@ vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vssubu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_tum( @@ -615,7 +615,7 @@ vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_tum( @@ -624,7 +624,7 @@ vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vssubu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_tum( @@ -633,7 +633,7 @@ vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tum( @@ -642,7 +642,7 @@ vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vssubu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tum( @@ -651,7 +651,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_tum( @@ -660,7 +660,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vssubu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_tum( @@ -669,7 +669,7 @@ vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_tum( @@ -678,7 +678,7 @@ vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vssubu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_tum( @@ -687,7 +687,7 @@ vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_tum( @@ -696,7 +696,7 @@ vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vssubu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_tum( @@ -705,7 +705,7 @@ vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_tum( @@ -714,7 +714,7 @@ vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vssubu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_tum( @@ -723,7 +723,7 @@ vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_tum( @@ -732,7 +732,7 @@ vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vssubu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_tum( @@ -741,7 +741,7 @@ vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_tum( @@ -750,7 +750,7 @@ vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vssubu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_tum( @@ -759,7 +759,7 @@ vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_tum( @@ -768,7 +768,7 @@ vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vssubu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_tum( @@ -777,7 +777,7 @@ vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_tum( @@ -786,7 +786,7 @@ vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vssubu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_tum( @@ -795,7 +795,7 @@ vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_tumu( @@ -804,7 +804,7 @@ vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vssubu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_tumu( @@ -813,7 +813,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_tumu( @@ -822,7 +822,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vssubu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_tumu( @@ -831,7 +831,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_tumu( @@ -840,7 +840,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vssubu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_tumu( @@ -849,7 +849,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_tumu( @@ -858,7 +858,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vssubu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_tumu( @@ -867,7 +867,7 @@ vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_tumu( @@ -876,7 +876,7 @@ vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vssubu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_tumu( @@ -885,7 +885,7 @@ vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_tumu( @@ -894,7 +894,7 @@ vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vssubu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_tumu( @@ -903,7 +903,7 @@ vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_tumu( @@ -912,7 +912,7 @@ vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vssubu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_tumu( @@ -921,7 +921,7 @@ vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_tumu( @@ -930,7 +930,7 @@ vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vssubu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_tumu( @@ -939,7 +939,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_tumu( @@ -948,7 +948,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vssubu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_tumu( @@ -957,7 +957,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_tumu( @@ -966,7 +966,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vssubu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_tumu( @@ -975,7 +975,7 @@ vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_tumu( @@ -984,7 +984,7 @@ vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vssubu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_tumu( @@ -993,7 +993,7 @@ vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_tumu( @@ -1002,7 +1002,7 @@ vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vssubu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_tumu( @@ -1011,7 +1011,7 @@ vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_tumu( @@ -1020,7 +1020,7 @@ vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vssubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_tumu( @@ -1029,7 +1029,7 @@ vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_tumu( @@ -1038,7 +1038,7 @@ vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vssubu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_tumu( @@ -1047,7 +1047,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_tumu( @@ -1056,7 +1056,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vssubu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_tumu( @@ -1065,7 +1065,7 @@ vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_tumu( @@ -1074,7 +1074,7 @@ vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vssubu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_tumu( @@ -1083,7 +1083,7 @@ vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_tumu( @@ -1092,7 +1092,7 @@ vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vssubu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_tumu( @@ -1101,7 +1101,7 @@ vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_tumu( @@ -1110,7 +1110,7 @@ vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vssubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_tumu( @@ -1119,7 +1119,7 @@ vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_tumu( @@ -1128,7 +1128,7 @@ vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vssubu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_tumu( @@ -1137,7 +1137,7 @@ vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_tumu( @@ -1146,7 +1146,7 @@ vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vssubu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_tumu( @@ -1155,7 +1155,7 @@ vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_tumu( @@ -1164,7 +1164,7 @@ vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vssubu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_tumu( @@ -1173,7 +1173,7 @@ vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_tumu( @@ -1182,7 +1182,7 @@ vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vssubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_tumu( @@ -1191,7 +1191,7 @@ vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_mu( @@ -1200,7 +1200,7 @@ vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vssubu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_mu( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_mu( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vssubu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_mu( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_mu( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vssubu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_mu( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_mu( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vssubu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_mu( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_mu( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vssubu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_mu( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_mu( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vssubu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_mu( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_mu( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vssubu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_mu( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vssubu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_mu( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vssubu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_mu( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_mu( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vssubu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_mu( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_mu( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vssubu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_mu( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_mu( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vssubu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_mu( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_mu( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vssubu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_mu( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_mu( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vssubu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_mu( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vssubu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_mu( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vssubu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_mu( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_mu( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vssubu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_mu( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_mu( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vssubu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_mu( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_mu( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vssubu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_mu( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_mu( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vssubu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_mu( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vssubu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_mu( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vssubu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_mu( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_mu( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vssubu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_mu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_mu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vssubu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_mu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_mu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vssubu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_mu( @@ -1587,6 +1587,6 @@ vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vssubu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vssubu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsub.c index 45cc4864544e..063eb6c50067 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsub.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsub_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vsub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsub_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vsub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsub_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vsub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsub_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vsub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsub_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vsub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsub_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vsub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsub_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vsub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsub_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsub_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsub_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vsub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsub_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vsub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsub_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vsub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsub_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vsub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsub_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsub_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vsub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsub_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vsub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsub_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vsub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsub_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vsub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsub_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vsub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsub_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vsub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsub_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vsub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsub_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vsub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_tu( @@ -408,7 +408,7 @@ vint64m8_t test_vsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsub_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_tu( @@ -417,7 +417,7 @@ vuint8mf8_t test_vsub_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_tu( @@ -426,7 +426,7 @@ vuint8mf8_t test_vsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsub_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_tu( @@ -435,7 +435,7 @@ vuint8mf4_t test_vsub_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_tu( @@ -444,7 +444,7 @@ vuint8mf4_t test_vsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsub_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_tu( @@ -453,7 +453,7 @@ vuint8mf2_t test_vsub_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_tu( @@ -462,7 +462,7 @@ vuint8mf2_t test_vsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsub_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_tu( @@ -471,7 +471,7 @@ vuint8m1_t test_vsub_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_tu( @@ -480,7 +480,7 @@ vuint8m1_t test_vsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsub_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_tu( @@ -489,7 +489,7 @@ vuint8m2_t test_vsub_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_tu( @@ -498,7 +498,7 @@ vuint8m2_t test_vsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsub_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_tu( @@ -507,7 +507,7 @@ vuint8m4_t test_vsub_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_tu( @@ -516,7 +516,7 @@ vuint8m4_t test_vsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsub_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_tu( @@ -525,7 +525,7 @@ vuint8m8_t test_vsub_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_tu( @@ -534,7 +534,7 @@ vuint8m8_t test_vsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsub_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_tu( @@ -543,7 +543,7 @@ vuint16mf4_t test_vsub_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_tu( @@ -552,7 +552,7 @@ vuint16mf4_t test_vsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsub_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_tu( @@ -561,7 +561,7 @@ vuint16mf2_t test_vsub_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_tu( @@ -570,7 +570,7 @@ vuint16mf2_t test_vsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsub_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_tu( @@ -579,7 +579,7 @@ vuint16m1_t test_vsub_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_tu( @@ -588,7 +588,7 @@ vuint16m1_t test_vsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsub_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_tu( @@ -597,7 +597,7 @@ vuint16m2_t test_vsub_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_tu( @@ -606,7 +606,7 @@ vuint16m2_t test_vsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsub_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_tu( @@ -615,7 +615,7 @@ vuint16m4_t test_vsub_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_tu( @@ -624,7 +624,7 @@ vuint16m4_t test_vsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsub_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_tu( @@ -633,7 +633,7 @@ vuint16m8_t test_vsub_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tu( @@ -642,7 +642,7 @@ vuint16m8_t test_vsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsub_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tu( @@ -651,7 +651,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_tu( @@ -660,7 +660,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsub_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_tu( @@ -669,7 +669,7 @@ vuint32m1_t test_vsub_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_tu( @@ -678,7 +678,7 @@ vuint32m1_t test_vsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsub_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_tu( @@ -687,7 +687,7 @@ vuint32m2_t test_vsub_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_tu( @@ -696,7 +696,7 @@ vuint32m2_t test_vsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsub_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_tu( @@ -705,7 +705,7 @@ vuint32m4_t test_vsub_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_tu( @@ -714,7 +714,7 @@ vuint32m4_t test_vsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsub_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_tu( @@ -723,7 +723,7 @@ vuint32m8_t test_vsub_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_tu( @@ -732,7 +732,7 @@ vuint32m8_t test_vsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsub_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_tu( @@ -741,7 +741,7 @@ vuint64m1_t test_vsub_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_tu( @@ -750,7 +750,7 @@ vuint64m1_t test_vsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsub_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_tu( @@ -759,7 +759,7 @@ vuint64m2_t test_vsub_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_tu( @@ -768,7 +768,7 @@ vuint64m2_t test_vsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsub_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_tu( @@ -777,7 +777,7 @@ vuint64m4_t test_vsub_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_tu( @@ -786,7 +786,7 @@ vuint64m4_t test_vsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsub_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_tu( @@ -795,7 +795,7 @@ vuint64m8_t test_vsub_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_tum( @@ -804,7 +804,7 @@ vuint64m8_t test_vsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsub_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_tum( @@ -813,7 +813,7 @@ vint8mf8_t test_vsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_tum( @@ -822,7 +822,7 @@ vint8mf8_t test_vsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsub_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_tum( @@ -831,7 +831,7 @@ vint8mf4_t test_vsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_tum( @@ -840,7 +840,7 @@ vint8mf4_t test_vsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsub_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_tum( @@ -849,7 +849,7 @@ vint8mf2_t test_vsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_tum( @@ -858,7 +858,7 @@ vint8mf2_t test_vsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsub_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_tum( @@ -867,7 +867,7 @@ vint8m1_t test_vsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_tum( @@ -876,7 +876,7 @@ vint8m1_t test_vsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsub_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_tum( @@ -885,7 +885,7 @@ vint8m2_t test_vsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_tum( @@ -894,7 +894,7 @@ vint8m2_t test_vsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsub_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_tum( @@ -903,7 +903,7 @@ vint8m4_t test_vsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_tum( @@ -912,7 +912,7 @@ vint8m4_t test_vsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsub_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_tum( @@ -921,7 +921,7 @@ vint8m8_t test_vsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_tum( @@ -930,7 +930,7 @@ vint8m8_t test_vsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_tum( @@ -939,7 +939,7 @@ vint16mf4_t test_vsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_tum( @@ -948,7 +948,7 @@ vint16mf4_t test_vsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_tum( @@ -957,7 +957,7 @@ vint16mf2_t test_vsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_tum( @@ -966,7 +966,7 @@ vint16mf2_t test_vsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_tum( @@ -975,7 +975,7 @@ vint16m1_t test_vsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_tum( @@ -984,7 +984,7 @@ vint16m1_t test_vsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_tum( @@ -993,7 +993,7 @@ vint16m2_t test_vsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_tum( @@ -1002,7 +1002,7 @@ vint16m2_t test_vsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_tum( @@ -1011,7 +1011,7 @@ vint16m4_t test_vsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_tum( @@ -1020,7 +1020,7 @@ vint16m4_t test_vsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_tum( @@ -1029,7 +1029,7 @@ vint16m8_t test_vsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tum( @@ -1038,7 +1038,7 @@ vint16m8_t test_vsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tum( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_tum( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_tum( @@ -1065,7 +1065,7 @@ vint32m1_t test_vsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_tum( @@ -1074,7 +1074,7 @@ vint32m1_t test_vsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_tum( @@ -1083,7 +1083,7 @@ vint32m2_t test_vsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_tum( @@ -1092,7 +1092,7 @@ vint32m2_t test_vsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_tum( @@ -1101,7 +1101,7 @@ vint32m4_t test_vsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_tum( @@ -1110,7 +1110,7 @@ vint32m4_t test_vsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_tum( @@ -1119,7 +1119,7 @@ vint32m8_t test_vsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_tum( @@ -1128,7 +1128,7 @@ vint32m8_t test_vsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_tum( @@ -1137,7 +1137,7 @@ vint64m1_t test_vsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_tum( @@ -1146,7 +1146,7 @@ vint64m1_t test_vsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_tum( @@ -1155,7 +1155,7 @@ vint64m2_t test_vsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_tum( @@ -1164,7 +1164,7 @@ vint64m2_t test_vsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_tum( @@ -1173,7 +1173,7 @@ vint64m4_t test_vsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_tum( @@ -1182,7 +1182,7 @@ vint64m4_t test_vsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_tum( @@ -1191,7 +1191,7 @@ vint64m8_t test_vsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_tum( @@ -1200,7 +1200,7 @@ vint64m8_t test_vsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsub_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_tum( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_tum( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsub_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_tum( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_tum( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsub_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_tum( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_tum( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsub_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_tum( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_tum( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsub_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_tum( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_tum( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsub_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_tum( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_tum( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsub_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_tum( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_tum( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsub_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_tum( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_tum( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsub_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_tum( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_tum( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsub_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_tum( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_tum( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsub_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_tum( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_tum( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsub_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_tum( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_tum( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsub_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_tum( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tum( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsub_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tum( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_tum( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsub_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_tum( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_tum( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsub_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_tum( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_tum( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsub_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_tum( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_tum( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsub_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_tum( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_tum( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsub_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_tum( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_tum( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsub_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_tum( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_tum( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsub_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_tum( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_tum( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsub_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_tum( @@ -1587,7 +1587,7 @@ vuint64m8_t test_vsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_tumu( @@ -1596,7 +1596,7 @@ vuint64m8_t test_vsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsub_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_tumu( @@ -1605,7 +1605,7 @@ vint8mf8_t test_vsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_tumu( @@ -1614,7 +1614,7 @@ vint8mf8_t test_vsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsub_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_tumu( @@ -1623,7 +1623,7 @@ vint8mf4_t test_vsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_tumu( @@ -1632,7 +1632,7 @@ vint8mf4_t test_vsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsub_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_tumu( @@ -1641,7 +1641,7 @@ vint8mf2_t test_vsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_tumu( @@ -1650,7 +1650,7 @@ vint8mf2_t test_vsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsub_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_tumu( @@ -1659,7 +1659,7 @@ vint8m1_t test_vsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_tumu( @@ -1668,7 +1668,7 @@ vint8m1_t test_vsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsub_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_tumu( @@ -1677,7 +1677,7 @@ vint8m2_t test_vsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_tumu( @@ -1686,7 +1686,7 @@ vint8m2_t test_vsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsub_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_tumu( @@ -1695,7 +1695,7 @@ vint8m4_t test_vsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_tumu( @@ -1704,7 +1704,7 @@ vint8m4_t test_vsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsub_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_tumu( @@ -1713,7 +1713,7 @@ vint8m8_t test_vsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_tumu( @@ -1722,7 +1722,7 @@ vint8m8_t test_vsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_tumu( @@ -1731,7 +1731,7 @@ vint16mf4_t test_vsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_tumu( @@ -1740,7 +1740,7 @@ vint16mf4_t test_vsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_tumu( @@ -1749,7 +1749,7 @@ vint16mf2_t test_vsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_tumu( @@ -1758,7 +1758,7 @@ vint16mf2_t test_vsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_tumu( @@ -1767,7 +1767,7 @@ vint16m1_t test_vsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_tumu( @@ -1776,7 +1776,7 @@ vint16m1_t test_vsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_tumu( @@ -1785,7 +1785,7 @@ vint16m2_t test_vsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_tumu( @@ -1794,7 +1794,7 @@ vint16m2_t test_vsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_tumu( @@ -1803,7 +1803,7 @@ vint16m4_t test_vsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_tumu( @@ -1812,7 +1812,7 @@ vint16m4_t test_vsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_tumu( @@ -1821,7 +1821,7 @@ vint16m8_t test_vsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_tumu( @@ -1830,7 +1830,7 @@ vint16m8_t test_vsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_tumu( @@ -1839,7 +1839,7 @@ vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_tumu( @@ -1848,7 +1848,7 @@ vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_tumu( @@ -1857,7 +1857,7 @@ vint32m1_t test_vsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_tumu( @@ -1866,7 +1866,7 @@ vint32m1_t test_vsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_tumu( @@ -1875,7 +1875,7 @@ vint32m2_t test_vsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_tumu( @@ -1884,7 +1884,7 @@ vint32m2_t test_vsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_tumu( @@ -1893,7 +1893,7 @@ vint32m4_t test_vsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_tumu( @@ -1902,7 +1902,7 @@ vint32m4_t test_vsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_tumu( @@ -1911,7 +1911,7 @@ vint32m8_t test_vsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_tumu( @@ -1920,7 +1920,7 @@ vint32m8_t test_vsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_tumu( @@ -1929,7 +1929,7 @@ vint64m1_t test_vsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_tumu( @@ -1938,7 +1938,7 @@ vint64m1_t test_vsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_tumu( @@ -1947,7 +1947,7 @@ vint64m2_t test_vsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_tumu( @@ -1956,7 +1956,7 @@ vint64m2_t test_vsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_tumu( @@ -1965,7 +1965,7 @@ vint64m4_t test_vsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_tumu( @@ -1974,7 +1974,7 @@ vint64m4_t test_vsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_tumu( @@ -1983,7 +1983,7 @@ vint64m8_t test_vsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_tumu( @@ -1992,7 +1992,7 @@ vint64m8_t test_vsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsub_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_tumu( @@ -2001,7 +2001,7 @@ vuint8mf8_t test_vsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_tumu( @@ -2010,7 +2010,7 @@ vuint8mf8_t test_vsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsub_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_tumu( @@ -2019,7 +2019,7 @@ vuint8mf4_t test_vsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_tumu( @@ -2028,7 +2028,7 @@ vuint8mf4_t test_vsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsub_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_tumu( @@ -2037,7 +2037,7 @@ vuint8mf2_t test_vsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_tumu( @@ -2046,7 +2046,7 @@ vuint8mf2_t test_vsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsub_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_tumu( @@ -2055,7 +2055,7 @@ vuint8m1_t test_vsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_tumu( @@ -2064,7 +2064,7 @@ vuint8m1_t test_vsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsub_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_tumu( @@ -2073,7 +2073,7 @@ vuint8m2_t test_vsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_tumu( @@ -2082,7 +2082,7 @@ vuint8m2_t test_vsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsub_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_tumu( @@ -2091,7 +2091,7 @@ vuint8m4_t test_vsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_tumu( @@ -2100,7 +2100,7 @@ vuint8m4_t test_vsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsub_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_tumu( @@ -2109,7 +2109,7 @@ vuint8m8_t test_vsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_tumu( @@ -2118,7 +2118,7 @@ vuint8m8_t test_vsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsub_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_tumu( @@ -2127,7 +2127,7 @@ vuint16mf4_t test_vsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_tumu( @@ -2136,7 +2136,7 @@ vuint16mf4_t test_vsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsub_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_tumu( @@ -2145,7 +2145,7 @@ vuint16mf2_t test_vsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_tumu( @@ -2154,7 +2154,7 @@ vuint16mf2_t test_vsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsub_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_tumu( @@ -2163,7 +2163,7 @@ vuint16m1_t test_vsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_tumu( @@ -2172,7 +2172,7 @@ vuint16m1_t test_vsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsub_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_tumu( @@ -2181,7 +2181,7 @@ vuint16m2_t test_vsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_tumu( @@ -2190,7 +2190,7 @@ vuint16m2_t test_vsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsub_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_tumu( @@ -2199,7 +2199,7 @@ vuint16m4_t test_vsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_tumu( @@ -2208,7 +2208,7 @@ vuint16m4_t test_vsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsub_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_tumu( @@ -2217,7 +2217,7 @@ vuint16m8_t test_vsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_tumu( @@ -2226,7 +2226,7 @@ vuint16m8_t test_vsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsub_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_tumu( @@ -2235,7 +2235,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_tumu( @@ -2244,7 +2244,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsub_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_tumu( @@ -2253,7 +2253,7 @@ vuint32m1_t test_vsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_tumu( @@ -2262,7 +2262,7 @@ vuint32m1_t test_vsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsub_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_tumu( @@ -2271,7 +2271,7 @@ vuint32m2_t test_vsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_tumu( @@ -2280,7 +2280,7 @@ vuint32m2_t test_vsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsub_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_tumu( @@ -2289,7 +2289,7 @@ vuint32m4_t test_vsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_tumu( @@ -2298,7 +2298,7 @@ vuint32m4_t test_vsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsub_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_tumu( @@ -2307,7 +2307,7 @@ vuint32m8_t test_vsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_tumu( @@ -2316,7 +2316,7 @@ vuint32m8_t test_vsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsub_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_tumu( @@ -2325,7 +2325,7 @@ vuint64m1_t test_vsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_tumu( @@ -2334,7 +2334,7 @@ vuint64m1_t test_vsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsub_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_tumu( @@ -2343,7 +2343,7 @@ vuint64m2_t test_vsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_tumu( @@ -2352,7 +2352,7 @@ vuint64m2_t test_vsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsub_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_tumu( @@ -2361,7 +2361,7 @@ vuint64m4_t test_vsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_tumu( @@ -2370,7 +2370,7 @@ vuint64m4_t test_vsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsub_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_tumu( @@ -2379,7 +2379,7 @@ vuint64m8_t test_vsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_mu( @@ -2388,7 +2388,7 @@ vuint64m8_t test_vsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsub_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_mu( @@ -2397,7 +2397,7 @@ vint8mf8_t test_vsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_mu( @@ -2406,7 +2406,7 @@ vint8mf8_t test_vsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsub_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_mu( @@ -2415,7 +2415,7 @@ vint8mf4_t test_vsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_mu( @@ -2424,7 +2424,7 @@ vint8mf4_t test_vsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsub_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_mu( @@ -2433,7 +2433,7 @@ vint8mf2_t test_vsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_mu( @@ -2442,7 +2442,7 @@ vint8mf2_t test_vsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsub_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_mu( @@ -2451,7 +2451,7 @@ vint8m1_t test_vsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_mu( @@ -2460,7 +2460,7 @@ vint8m1_t test_vsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsub_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_mu( @@ -2469,7 +2469,7 @@ vint8m2_t test_vsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_mu( @@ -2478,7 +2478,7 @@ vint8m2_t test_vsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsub_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_mu( @@ -2487,7 +2487,7 @@ vint8m4_t test_vsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_mu( @@ -2496,7 +2496,7 @@ vint8m4_t test_vsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsub_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_mu( @@ -2505,7 +2505,7 @@ vint8m8_t test_vsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_mu( @@ -2514,7 +2514,7 @@ vint8m8_t test_vsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_mu( @@ -2523,7 +2523,7 @@ vint16mf4_t test_vsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_mu( @@ -2532,7 +2532,7 @@ vint16mf4_t test_vsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_mu( @@ -2541,7 +2541,7 @@ vint16mf2_t test_vsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_mu( @@ -2550,7 +2550,7 @@ vint16mf2_t test_vsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_mu( @@ -2559,7 +2559,7 @@ vint16m1_t test_vsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_mu( @@ -2568,7 +2568,7 @@ vint16m1_t test_vsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_mu( @@ -2577,7 +2577,7 @@ vint16m2_t test_vsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_mu( @@ -2586,7 +2586,7 @@ vint16m2_t test_vsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_mu( @@ -2595,7 +2595,7 @@ vint16m4_t test_vsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_mu( @@ -2604,7 +2604,7 @@ vint16m4_t test_vsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_mu( @@ -2613,7 +2613,7 @@ vint16m8_t test_vsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_mu( @@ -2622,7 +2622,7 @@ vint16m8_t test_vsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_mu( @@ -2631,7 +2631,7 @@ vint32mf2_t test_vsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_mu( @@ -2640,7 +2640,7 @@ vint32mf2_t test_vsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_mu( @@ -2649,7 +2649,7 @@ vint32m1_t test_vsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_mu( @@ -2658,7 +2658,7 @@ vint32m1_t test_vsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_mu( @@ -2667,7 +2667,7 @@ vint32m2_t test_vsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_mu( @@ -2676,7 +2676,7 @@ vint32m2_t test_vsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_mu( @@ -2685,7 +2685,7 @@ vint32m4_t test_vsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_mu( @@ -2694,7 +2694,7 @@ vint32m4_t test_vsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_mu( @@ -2703,7 +2703,7 @@ vint32m8_t test_vsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_mu( @@ -2712,7 +2712,7 @@ vint32m8_t test_vsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_mu( @@ -2721,7 +2721,7 @@ vint64m1_t test_vsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_mu( @@ -2730,7 +2730,7 @@ vint64m1_t test_vsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_mu( @@ -2739,7 +2739,7 @@ vint64m2_t test_vsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_mu( @@ -2748,7 +2748,7 @@ vint64m2_t test_vsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_mu( @@ -2757,7 +2757,7 @@ vint64m4_t test_vsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_mu( @@ -2766,7 +2766,7 @@ vint64m4_t test_vsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_mu( @@ -2775,7 +2775,7 @@ vint64m8_t test_vsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_mu( @@ -2784,7 +2784,7 @@ vint64m8_t test_vsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsub_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_mu( @@ -2793,7 +2793,7 @@ vuint8mf8_t test_vsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_mu( @@ -2802,7 +2802,7 @@ vuint8mf8_t test_vsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsub_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_mu( @@ -2811,7 +2811,7 @@ vuint8mf4_t test_vsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_mu( @@ -2820,7 +2820,7 @@ vuint8mf4_t test_vsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsub_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_mu( @@ -2829,7 +2829,7 @@ vuint8mf2_t test_vsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_mu( @@ -2838,7 +2838,7 @@ vuint8mf2_t test_vsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsub_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_mu( @@ -2847,7 +2847,7 @@ vuint8m1_t test_vsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_mu( @@ -2856,7 +2856,7 @@ vuint8m1_t test_vsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsub_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_mu( @@ -2865,7 +2865,7 @@ vuint8m2_t test_vsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_mu( @@ -2874,7 +2874,7 @@ vuint8m2_t test_vsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsub_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_mu( @@ -2883,7 +2883,7 @@ vuint8m4_t test_vsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_mu( @@ -2892,7 +2892,7 @@ vuint8m4_t test_vsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsub_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_mu( @@ -2901,7 +2901,7 @@ vuint8m8_t test_vsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsub_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_mu( @@ -2910,7 +2910,7 @@ vuint8m8_t test_vsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsub_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_mu( @@ -2919,7 +2919,7 @@ vuint16mf4_t test_vsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_mu( @@ -2928,7 +2928,7 @@ vuint16mf4_t test_vsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsub_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_mu( @@ -2937,7 +2937,7 @@ vuint16mf2_t test_vsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_mu( @@ -2946,7 +2946,7 @@ vuint16mf2_t test_vsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsub_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_mu( @@ -2955,7 +2955,7 @@ vuint16m1_t test_vsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_mu( @@ -2964,7 +2964,7 @@ vuint16m1_t test_vsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsub_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_mu( @@ -2973,7 +2973,7 @@ vuint16m2_t test_vsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_mu( @@ -2982,7 +2982,7 @@ vuint16m2_t test_vsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsub_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_mu( @@ -2991,7 +2991,7 @@ vuint16m4_t test_vsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_mu( @@ -3000,7 +3000,7 @@ vuint16m4_t test_vsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsub_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_mu( @@ -3009,7 +3009,7 @@ vuint16m8_t test_vsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsub_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_mu( @@ -3018,7 +3018,7 @@ vuint16m8_t test_vsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsub_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_mu( @@ -3027,7 +3027,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_mu( @@ -3036,7 +3036,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsub_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_mu( @@ -3045,7 +3045,7 @@ vuint32m1_t test_vsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_mu( @@ -3054,7 +3054,7 @@ vuint32m1_t test_vsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsub_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_mu( @@ -3063,7 +3063,7 @@ vuint32m2_t test_vsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_mu( @@ -3072,7 +3072,7 @@ vuint32m2_t test_vsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsub_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_mu( @@ -3081,7 +3081,7 @@ vuint32m4_t test_vsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_mu( @@ -3090,7 +3090,7 @@ vuint32m4_t test_vsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsub_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_mu( @@ -3099,7 +3099,7 @@ vuint32m8_t test_vsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsub_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_mu( @@ -3108,7 +3108,7 @@ vuint32m8_t test_vsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsub_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_mu( @@ -3117,7 +3117,7 @@ vuint64m1_t test_vsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_mu( @@ -3126,7 +3126,7 @@ vuint64m1_t test_vsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsub_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_mu( @@ -3135,7 +3135,7 @@ vuint64m2_t test_vsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_mu( @@ -3144,7 +3144,7 @@ vuint64m2_t test_vsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsub_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_mu( @@ -3153,7 +3153,7 @@ vuint64m4_t test_vsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_mu( @@ -3162,7 +3162,7 @@ vuint64m4_t test_vsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsub_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_mu( @@ -3171,6 +3171,6 @@ vuint64m8_t test_vsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsub_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vsub_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwadd.c index 241bad2e84ca..ceb621348cf5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwadd.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_tu( @@ -21,7 +21,7 @@ vint16mf4_t test_vwadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_tu( @@ -30,7 +30,7 @@ vint16mf4_t test_vwadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_wv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_tu( @@ -39,7 +39,7 @@ vint16mf4_t test_vwadd_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_tu( @@ -48,7 +48,7 @@ vint16mf4_t test_vwadd_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_tu( @@ -57,7 +57,7 @@ vint16mf2_t test_vwadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_tu( @@ -66,7 +66,7 @@ vint16mf2_t test_vwadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_wv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_tu( @@ -75,7 +75,7 @@ vint16mf2_t test_vwadd_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_tu( @@ -84,7 +84,7 @@ vint16mf2_t test_vwadd_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_tu( @@ -93,7 +93,7 @@ vint16m1_t test_vwadd_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vwadd_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_wv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_tu( @@ -111,7 +111,7 @@ vint16m1_t test_vwadd_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_tu( @@ -120,7 +120,7 @@ vint16m1_t test_vwadd_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwadd_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_tu( @@ -129,7 +129,7 @@ vint16m2_t test_vwadd_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_tu( @@ -138,7 +138,7 @@ vint16m2_t test_vwadd_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwadd_wv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_tu( @@ -147,7 +147,7 @@ vint16m2_t test_vwadd_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_tu( @@ -156,7 +156,7 @@ vint16m2_t test_vwadd_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwadd_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_tu( @@ -165,7 +165,7 @@ vint16m4_t test_vwadd_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_tu( @@ -174,7 +174,7 @@ vint16m4_t test_vwadd_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwadd_wv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_tu( @@ -183,7 +183,7 @@ vint16m4_t test_vwadd_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_tu( @@ -192,7 +192,7 @@ vint16m4_t test_vwadd_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwadd_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_tu( @@ -201,7 +201,7 @@ vint16m8_t test_vwadd_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_tu( @@ -210,7 +210,7 @@ vint16m8_t test_vwadd_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwadd_wv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_tu( @@ -219,7 +219,7 @@ vint16m8_t test_vwadd_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_tu( @@ -228,7 +228,7 @@ vint16m8_t test_vwadd_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_tu( @@ -237,7 +237,7 @@ vint32mf2_t test_vwadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_tu( @@ -246,7 +246,7 @@ vint32mf2_t test_vwadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_wv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vwadd_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vwadd_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vwadd_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vwadd_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_wv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_tu( @@ -291,7 +291,7 @@ vint32m1_t test_vwadd_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_tu( @@ -300,7 +300,7 @@ vint32m1_t test_vwadd_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwadd_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_tu( @@ -309,7 +309,7 @@ vint32m2_t test_vwadd_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_tu( @@ -318,7 +318,7 @@ vint32m2_t test_vwadd_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwadd_wv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_tu( @@ -327,7 +327,7 @@ vint32m2_t test_vwadd_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_tu( @@ -336,7 +336,7 @@ vint32m2_t test_vwadd_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwadd_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_tu( @@ -345,7 +345,7 @@ vint32m4_t test_vwadd_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_tu( @@ -354,7 +354,7 @@ vint32m4_t test_vwadd_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwadd_wv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_tu( @@ -363,7 +363,7 @@ vint32m4_t test_vwadd_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_tu( @@ -372,7 +372,7 @@ vint32m4_t test_vwadd_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwadd_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_tu( @@ -381,7 +381,7 @@ vint32m8_t test_vwadd_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_tu( @@ -390,7 +390,7 @@ vint32m8_t test_vwadd_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwadd_wv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_tu( @@ -399,7 +399,7 @@ vint32m8_t test_vwadd_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tu( @@ -408,7 +408,7 @@ vint32m8_t test_vwadd_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tu( @@ -417,7 +417,7 @@ vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tu( @@ -426,7 +426,7 @@ vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_wv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tu( @@ -435,7 +435,7 @@ vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_tu( @@ -444,7 +444,7 @@ vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwadd_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_tu( @@ -453,7 +453,7 @@ vint64m2_t test_vwadd_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_tu( @@ -462,7 +462,7 @@ vint64m2_t test_vwadd_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwadd_wv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_tu( @@ -471,7 +471,7 @@ vint64m2_t test_vwadd_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_tu( @@ -480,7 +480,7 @@ vint64m2_t test_vwadd_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwadd_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_tu( @@ -489,7 +489,7 @@ vint64m4_t test_vwadd_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_tu( @@ -498,7 +498,7 @@ vint64m4_t test_vwadd_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwadd_wv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_tu( @@ -507,7 +507,7 @@ vint64m4_t test_vwadd_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_tu( @@ -516,7 +516,7 @@ vint64m4_t test_vwadd_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwadd_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_tu( @@ -525,7 +525,7 @@ vint64m8_t test_vwadd_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_tu( @@ -534,7 +534,7 @@ vint64m8_t test_vwadd_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwadd_wv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_tu( @@ -543,7 +543,7 @@ vint64m8_t test_vwadd_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_tum( @@ -552,7 +552,7 @@ vint64m8_t test_vwadd_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_tum( @@ -561,7 +561,7 @@ vint16mf4_t test_vwadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_tum( @@ -570,7 +570,7 @@ vint16mf4_t test_vwadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_wv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_tum( @@ -579,7 +579,7 @@ vint16mf4_t test_vwadd_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_tum( @@ -588,7 +588,7 @@ vint16mf4_t test_vwadd_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_tum( @@ -597,7 +597,7 @@ vint16mf2_t test_vwadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_tum( @@ -606,7 +606,7 @@ vint16mf2_t test_vwadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_wv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_tum( @@ -615,7 +615,7 @@ vint16mf2_t test_vwadd_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_tum( @@ -624,7 +624,7 @@ vint16mf2_t test_vwadd_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_tum( @@ -633,7 +633,7 @@ vint16m1_t test_vwadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_tum( @@ -642,7 +642,7 @@ vint16m1_t test_vwadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_wv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_tum( @@ -651,7 +651,7 @@ vint16m1_t test_vwadd_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_tum( @@ -660,7 +660,7 @@ vint16m1_t test_vwadd_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_tum( @@ -669,7 +669,7 @@ vint16m2_t test_vwadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_tum( @@ -678,7 +678,7 @@ vint16m2_t test_vwadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwadd_wv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_tum( @@ -687,7 +687,7 @@ vint16m2_t test_vwadd_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_tum( @@ -696,7 +696,7 @@ vint16m2_t test_vwadd_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_tum( @@ -705,7 +705,7 @@ vint16m4_t test_vwadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_tum( @@ -714,7 +714,7 @@ vint16m4_t test_vwadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwadd_wv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_tum( @@ -723,7 +723,7 @@ vint16m4_t test_vwadd_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_tum( @@ -732,7 +732,7 @@ vint16m4_t test_vwadd_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_tum( @@ -741,7 +741,7 @@ vint16m8_t test_vwadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_tum( @@ -750,7 +750,7 @@ vint16m8_t test_vwadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwadd_wv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_tum( @@ -759,7 +759,7 @@ vint16m8_t test_vwadd_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_tum( @@ -768,7 +768,7 @@ vint16m8_t test_vwadd_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_tum( @@ -777,7 +777,7 @@ vint32mf2_t test_vwadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_tum( @@ -786,7 +786,7 @@ vint32mf2_t test_vwadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_wv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_tum( @@ -795,7 +795,7 @@ vint32mf2_t test_vwadd_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_tum( @@ -804,7 +804,7 @@ vint32mf2_t test_vwadd_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_tum( @@ -813,7 +813,7 @@ vint32m1_t test_vwadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_tum( @@ -822,7 +822,7 @@ vint32m1_t test_vwadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_wv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_tum( @@ -831,7 +831,7 @@ vint32m1_t test_vwadd_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_tum( @@ -840,7 +840,7 @@ vint32m1_t test_vwadd_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_tum( @@ -849,7 +849,7 @@ vint32m2_t test_vwadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_tum( @@ -858,7 +858,7 @@ vint32m2_t test_vwadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwadd_wv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_tum( @@ -867,7 +867,7 @@ vint32m2_t test_vwadd_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_tum( @@ -876,7 +876,7 @@ vint32m2_t test_vwadd_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_tum( @@ -885,7 +885,7 @@ vint32m4_t test_vwadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_tum( @@ -894,7 +894,7 @@ vint32m4_t test_vwadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwadd_wv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_tum( @@ -903,7 +903,7 @@ vint32m4_t test_vwadd_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_tum( @@ -912,7 +912,7 @@ vint32m4_t test_vwadd_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_tum( @@ -921,7 +921,7 @@ vint32m8_t test_vwadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_tum( @@ -930,7 +930,7 @@ vint32m8_t test_vwadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwadd_wv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_tum( @@ -939,7 +939,7 @@ vint32m8_t test_vwadd_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tum( @@ -948,7 +948,7 @@ vint32m8_t test_vwadd_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tum( @@ -957,7 +957,7 @@ vint64m1_t test_vwadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tum( @@ -966,7 +966,7 @@ vint64m1_t test_vwadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_wv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tum( @@ -975,7 +975,7 @@ vint64m1_t test_vwadd_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_tum( @@ -984,7 +984,7 @@ vint64m1_t test_vwadd_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_tum( @@ -993,7 +993,7 @@ vint64m2_t test_vwadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_tum( @@ -1002,7 +1002,7 @@ vint64m2_t test_vwadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwadd_wv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_tum( @@ -1011,7 +1011,7 @@ vint64m2_t test_vwadd_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_tum( @@ -1020,7 +1020,7 @@ vint64m2_t test_vwadd_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_tum( @@ -1029,7 +1029,7 @@ vint64m4_t test_vwadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_tum( @@ -1038,7 +1038,7 @@ vint64m4_t test_vwadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwadd_wv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_tum( @@ -1047,7 +1047,7 @@ vint64m4_t test_vwadd_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_tum( @@ -1056,7 +1056,7 @@ vint64m4_t test_vwadd_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_tum( @@ -1065,7 +1065,7 @@ vint64m8_t test_vwadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_tum( @@ -1074,7 +1074,7 @@ vint64m8_t test_vwadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwadd_wv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_tum( @@ -1083,7 +1083,7 @@ vint64m8_t test_vwadd_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_tumu( @@ -1092,7 +1092,7 @@ vint64m8_t test_vwadd_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_tumu( @@ -1101,7 +1101,7 @@ vint16mf4_t test_vwadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_tumu( @@ -1110,7 +1110,7 @@ vint16mf4_t test_vwadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_wv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_tumu( @@ -1119,7 +1119,7 @@ vint16mf4_t test_vwadd_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_tumu( @@ -1128,7 +1128,7 @@ vint16mf4_t test_vwadd_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_tumu( @@ -1137,7 +1137,7 @@ vint16mf2_t test_vwadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_tumu( @@ -1146,7 +1146,7 @@ vint16mf2_t test_vwadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_wv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_tumu( @@ -1155,7 +1155,7 @@ vint16mf2_t test_vwadd_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_tumu( @@ -1164,7 +1164,7 @@ vint16mf2_t test_vwadd_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_tumu( @@ -1173,7 +1173,7 @@ vint16m1_t test_vwadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_tumu( @@ -1182,7 +1182,7 @@ vint16m1_t test_vwadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_wv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_tumu( @@ -1191,7 +1191,7 @@ vint16m1_t test_vwadd_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_tumu( @@ -1200,7 +1200,7 @@ vint16m1_t test_vwadd_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_tumu( @@ -1209,7 +1209,7 @@ vint16m2_t test_vwadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_tumu( @@ -1218,7 +1218,7 @@ vint16m2_t test_vwadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwadd_wv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_tumu( @@ -1227,7 +1227,7 @@ vint16m2_t test_vwadd_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_tumu( @@ -1236,7 +1236,7 @@ vint16m2_t test_vwadd_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_tumu( @@ -1245,7 +1245,7 @@ vint16m4_t test_vwadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_tumu( @@ -1254,7 +1254,7 @@ vint16m4_t test_vwadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwadd_wv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_tumu( @@ -1263,7 +1263,7 @@ vint16m4_t test_vwadd_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_tumu( @@ -1272,7 +1272,7 @@ vint16m4_t test_vwadd_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_tumu( @@ -1281,7 +1281,7 @@ vint16m8_t test_vwadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_tumu( @@ -1290,7 +1290,7 @@ vint16m8_t test_vwadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwadd_wv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_tumu( @@ -1299,7 +1299,7 @@ vint16m8_t test_vwadd_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_tumu( @@ -1308,7 +1308,7 @@ vint16m8_t test_vwadd_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_tumu( @@ -1317,7 +1317,7 @@ vint32mf2_t test_vwadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_tumu( @@ -1326,7 +1326,7 @@ vint32mf2_t test_vwadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_wv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_tumu( @@ -1335,7 +1335,7 @@ vint32mf2_t test_vwadd_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_tumu( @@ -1344,7 +1344,7 @@ vint32mf2_t test_vwadd_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_tumu( @@ -1353,7 +1353,7 @@ vint32m1_t test_vwadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_tumu( @@ -1362,7 +1362,7 @@ vint32m1_t test_vwadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_wv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_tumu( @@ -1371,7 +1371,7 @@ vint32m1_t test_vwadd_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_tumu( @@ -1380,7 +1380,7 @@ vint32m1_t test_vwadd_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_tumu( @@ -1389,7 +1389,7 @@ vint32m2_t test_vwadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_tumu( @@ -1398,7 +1398,7 @@ vint32m2_t test_vwadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwadd_wv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_tumu( @@ -1407,7 +1407,7 @@ vint32m2_t test_vwadd_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_tumu( @@ -1416,7 +1416,7 @@ vint32m2_t test_vwadd_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_tumu( @@ -1425,7 +1425,7 @@ vint32m4_t test_vwadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_tumu( @@ -1434,7 +1434,7 @@ vint32m4_t test_vwadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwadd_wv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_tumu( @@ -1443,7 +1443,7 @@ vint32m4_t test_vwadd_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_tumu( @@ -1452,7 +1452,7 @@ vint32m4_t test_vwadd_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_tumu( @@ -1461,7 +1461,7 @@ vint32m8_t test_vwadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_tumu( @@ -1470,7 +1470,7 @@ vint32m8_t test_vwadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwadd_wv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_tumu( @@ -1479,7 +1479,7 @@ vint32m8_t test_vwadd_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_tumu( @@ -1488,7 +1488,7 @@ vint32m8_t test_vwadd_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_tumu( @@ -1497,7 +1497,7 @@ vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_tumu( @@ -1506,7 +1506,7 @@ vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_wv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_tumu( @@ -1515,7 +1515,7 @@ vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_tumu( @@ -1524,7 +1524,7 @@ vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_tumu( @@ -1533,7 +1533,7 @@ vint64m2_t test_vwadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_tumu( @@ -1542,7 +1542,7 @@ vint64m2_t test_vwadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwadd_wv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_tumu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vwadd_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_tumu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vwadd_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_tumu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vwadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_tumu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vwadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwadd_wv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_tumu( @@ -1587,7 +1587,7 @@ vint64m4_t test_vwadd_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_tumu( @@ -1596,7 +1596,7 @@ vint64m4_t test_vwadd_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_tumu( @@ -1605,7 +1605,7 @@ vint64m8_t test_vwadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_tumu( @@ -1614,7 +1614,7 @@ vint64m8_t test_vwadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwadd_wv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_tumu( @@ -1623,7 +1623,7 @@ vint64m8_t test_vwadd_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_mu( @@ -1632,7 +1632,7 @@ vint64m8_t test_vwadd_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_mu( @@ -1641,7 +1641,7 @@ vint16mf4_t test_vwadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_mu( @@ -1650,7 +1650,7 @@ vint16mf4_t test_vwadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_wv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_mu( @@ -1659,7 +1659,7 @@ vint16mf4_t test_vwadd_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_mu( @@ -1668,7 +1668,7 @@ vint16mf4_t test_vwadd_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_mu( @@ -1677,7 +1677,7 @@ vint16mf2_t test_vwadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_mu( @@ -1686,7 +1686,7 @@ vint16mf2_t test_vwadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_wv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_mu( @@ -1695,7 +1695,7 @@ vint16mf2_t test_vwadd_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_mu( @@ -1704,7 +1704,7 @@ vint16mf2_t test_vwadd_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_mu( @@ -1713,7 +1713,7 @@ vint16m1_t test_vwadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_mu( @@ -1722,7 +1722,7 @@ vint16m1_t test_vwadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_wv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_mu( @@ -1731,7 +1731,7 @@ vint16m1_t test_vwadd_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_mu( @@ -1740,7 +1740,7 @@ vint16m1_t test_vwadd_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_mu( @@ -1749,7 +1749,7 @@ vint16m2_t test_vwadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_mu( @@ -1758,7 +1758,7 @@ vint16m2_t test_vwadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwadd_wv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_mu( @@ -1767,7 +1767,7 @@ vint16m2_t test_vwadd_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_mu( @@ -1776,7 +1776,7 @@ vint16m2_t test_vwadd_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_mu( @@ -1785,7 +1785,7 @@ vint16m4_t test_vwadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_mu( @@ -1794,7 +1794,7 @@ vint16m4_t test_vwadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwadd_wv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_mu( @@ -1803,7 +1803,7 @@ vint16m4_t test_vwadd_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_mu( @@ -1812,7 +1812,7 @@ vint16m4_t test_vwadd_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_mu( @@ -1821,7 +1821,7 @@ vint16m8_t test_vwadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_mu( @@ -1830,7 +1830,7 @@ vint16m8_t test_vwadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwadd_wv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_mu( @@ -1839,7 +1839,7 @@ vint16m8_t test_vwadd_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwadd_wx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_mu( @@ -1848,7 +1848,7 @@ vint16m8_t test_vwadd_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_mu( @@ -1857,7 +1857,7 @@ vint32mf2_t test_vwadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_mu( @@ -1866,7 +1866,7 @@ vint32mf2_t test_vwadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_wv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_mu( @@ -1875,7 +1875,7 @@ vint32mf2_t test_vwadd_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_mu( @@ -1884,7 +1884,7 @@ vint32mf2_t test_vwadd_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_mu( @@ -1893,7 +1893,7 @@ vint32m1_t test_vwadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_mu( @@ -1902,7 +1902,7 @@ vint32m1_t test_vwadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_wv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_mu( @@ -1911,7 +1911,7 @@ vint32m1_t test_vwadd_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_mu( @@ -1920,7 +1920,7 @@ vint32m1_t test_vwadd_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_mu( @@ -1929,7 +1929,7 @@ vint32m2_t test_vwadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_mu( @@ -1938,7 +1938,7 @@ vint32m2_t test_vwadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwadd_wv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_mu( @@ -1947,7 +1947,7 @@ vint32m2_t test_vwadd_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_mu( @@ -1956,7 +1956,7 @@ vint32m2_t test_vwadd_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_mu( @@ -1965,7 +1965,7 @@ vint32m4_t test_vwadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_mu( @@ -1974,7 +1974,7 @@ vint32m4_t test_vwadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwadd_wv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_mu( @@ -1983,7 +1983,7 @@ vint32m4_t test_vwadd_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_mu( @@ -1992,7 +1992,7 @@ vint32m4_t test_vwadd_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_mu( @@ -2001,7 +2001,7 @@ vint32m8_t test_vwadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_mu( @@ -2010,7 +2010,7 @@ vint32m8_t test_vwadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwadd_wv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_mu( @@ -2019,7 +2019,7 @@ vint32m8_t test_vwadd_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwadd_wx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_mu( @@ -2028,7 +2028,7 @@ vint32m8_t test_vwadd_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_mu( @@ -2037,7 +2037,7 @@ vint64m1_t test_vwadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_mu( @@ -2046,7 +2046,7 @@ vint64m1_t test_vwadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_wv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_mu( @@ -2055,7 +2055,7 @@ vint64m1_t test_vwadd_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_mu( @@ -2064,7 +2064,7 @@ vint64m1_t test_vwadd_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_mu( @@ -2073,7 +2073,7 @@ vint64m2_t test_vwadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_mu( @@ -2082,7 +2082,7 @@ vint64m2_t test_vwadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwadd_wv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_mu( @@ -2091,7 +2091,7 @@ vint64m2_t test_vwadd_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_mu( @@ -2100,7 +2100,7 @@ vint64m2_t test_vwadd_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_mu( @@ -2109,7 +2109,7 @@ vint64m4_t test_vwadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_mu( @@ -2118,7 +2118,7 @@ vint64m4_t test_vwadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwadd_wv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_mu( @@ -2127,7 +2127,7 @@ vint64m4_t test_vwadd_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_mu( @@ -2136,7 +2136,7 @@ vint64m4_t test_vwadd_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_mu( @@ -2145,7 +2145,7 @@ vint64m8_t test_vwadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_mu( @@ -2154,7 +2154,7 @@ vint64m8_t test_vwadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwadd_wv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_mu( @@ -2163,6 +2163,6 @@ vint64m8_t test_vwadd_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwadd_wx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwadd_wx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwaddu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwaddu.c index 2ce489f7c644..42e2ac7af0ef 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwaddu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwaddu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_tu( @@ -21,7 +21,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_tu( @@ -30,7 +30,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_wv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_tu( @@ -39,7 +39,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_tu( @@ -48,7 +48,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_tu( @@ -57,7 +57,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_tu( @@ -66,7 +66,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_wv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_tu( @@ -75,7 +75,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_tu( @@ -84,7 +84,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_tu( @@ -93,7 +93,7 @@ vuint16m1_t test_vwaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_tu( @@ -102,7 +102,7 @@ vuint16m1_t test_vwaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_wv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_tu( @@ -111,7 +111,7 @@ vuint16m1_t test_vwaddu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_tu( @@ -120,7 +120,7 @@ vuint16m1_t test_vwaddu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_tu( @@ -129,7 +129,7 @@ vuint16m2_t test_vwaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_tu( @@ -138,7 +138,7 @@ vuint16m2_t test_vwaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_wv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_tu( @@ -147,7 +147,7 @@ vuint16m2_t test_vwaddu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_tu( @@ -156,7 +156,7 @@ vuint16m2_t test_vwaddu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_tu( @@ -165,7 +165,7 @@ vuint16m4_t test_vwaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_tu( @@ -174,7 +174,7 @@ vuint16m4_t test_vwaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_wv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_tu( @@ -183,7 +183,7 @@ vuint16m4_t test_vwaddu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_tu( @@ -192,7 +192,7 @@ vuint16m4_t test_vwaddu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_tu( @@ -201,7 +201,7 @@ vuint16m8_t test_vwaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_tu( @@ -210,7 +210,7 @@ vuint16m8_t test_vwaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_wv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_tu( @@ -219,7 +219,7 @@ vuint16m8_t test_vwaddu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_tu( @@ -228,7 +228,7 @@ vuint16m8_t test_vwaddu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_tu( @@ -237,7 +237,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_wv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vwaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vwaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_wv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_tu( @@ -291,7 +291,7 @@ vuint32m1_t test_vwaddu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_tu( @@ -300,7 +300,7 @@ vuint32m1_t test_vwaddu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_tu( @@ -309,7 +309,7 @@ vuint32m2_t test_vwaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_tu( @@ -318,7 +318,7 @@ vuint32m2_t test_vwaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_wv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_tu( @@ -327,7 +327,7 @@ vuint32m2_t test_vwaddu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_tu( @@ -336,7 +336,7 @@ vuint32m2_t test_vwaddu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_tu( @@ -345,7 +345,7 @@ vuint32m4_t test_vwaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_tu( @@ -354,7 +354,7 @@ vuint32m4_t test_vwaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_wv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_tu( @@ -363,7 +363,7 @@ vuint32m4_t test_vwaddu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_tu( @@ -372,7 +372,7 @@ vuint32m4_t test_vwaddu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_tu( @@ -381,7 +381,7 @@ vuint32m8_t test_vwaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_tu( @@ -390,7 +390,7 @@ vuint32m8_t test_vwaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_wv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_tu( @@ -399,7 +399,7 @@ vuint32m8_t test_vwaddu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tu( @@ -408,7 +408,7 @@ vuint32m8_t test_vwaddu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tu( @@ -417,7 +417,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tu( @@ -426,7 +426,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_wv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tu( @@ -435,7 +435,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_tu( @@ -444,7 +444,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_tu( @@ -453,7 +453,7 @@ vuint64m2_t test_vwaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_tu( @@ -462,7 +462,7 @@ vuint64m2_t test_vwaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_wv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_tu( @@ -471,7 +471,7 @@ vuint64m2_t test_vwaddu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_tu( @@ -480,7 +480,7 @@ vuint64m2_t test_vwaddu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_tu( @@ -489,7 +489,7 @@ vuint64m4_t test_vwaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_tu( @@ -498,7 +498,7 @@ vuint64m4_t test_vwaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_wv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_tu( @@ -507,7 +507,7 @@ vuint64m4_t test_vwaddu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_tu( @@ -516,7 +516,7 @@ vuint64m4_t test_vwaddu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_tu( @@ -525,7 +525,7 @@ vuint64m8_t test_vwaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_tu( @@ -534,7 +534,7 @@ vuint64m8_t test_vwaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_wv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_tu( @@ -543,7 +543,7 @@ vuint64m8_t test_vwaddu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_tum( @@ -552,7 +552,7 @@ vuint64m8_t test_vwaddu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_tum( @@ -561,7 +561,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_tum( @@ -570,7 +570,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_wv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_tum( @@ -579,7 +579,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_tum( @@ -588,7 +588,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_tum( @@ -597,7 +597,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_tum( @@ -606,7 +606,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_wv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_tum( @@ -615,7 +615,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_tum( @@ -624,7 +624,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_tum( @@ -633,7 +633,7 @@ vuint16m1_t test_vwaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_tum( @@ -642,7 +642,7 @@ vuint16m1_t test_vwaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_wv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_tum( @@ -651,7 +651,7 @@ vuint16m1_t test_vwaddu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_tum( @@ -660,7 +660,7 @@ vuint16m1_t test_vwaddu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_tum( @@ -669,7 +669,7 @@ vuint16m2_t test_vwaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_tum( @@ -678,7 +678,7 @@ vuint16m2_t test_vwaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_wv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_tum( @@ -687,7 +687,7 @@ vuint16m2_t test_vwaddu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_tum( @@ -696,7 +696,7 @@ vuint16m2_t test_vwaddu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_tum( @@ -705,7 +705,7 @@ vuint16m4_t test_vwaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_tum( @@ -714,7 +714,7 @@ vuint16m4_t test_vwaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_wv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_tum( @@ -723,7 +723,7 @@ vuint16m4_t test_vwaddu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_tum( @@ -732,7 +732,7 @@ vuint16m4_t test_vwaddu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_tum( @@ -741,7 +741,7 @@ vuint16m8_t test_vwaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_tum( @@ -750,7 +750,7 @@ vuint16m8_t test_vwaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_wv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_tum( @@ -759,7 +759,7 @@ vuint16m8_t test_vwaddu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_tum( @@ -768,7 +768,7 @@ vuint16m8_t test_vwaddu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_tum( @@ -777,7 +777,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_tum( @@ -786,7 +786,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_wv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_tum( @@ -795,7 +795,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_tum( @@ -804,7 +804,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_tum( @@ -813,7 +813,7 @@ vuint32m1_t test_vwaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_tum( @@ -822,7 +822,7 @@ vuint32m1_t test_vwaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_wv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_tum( @@ -831,7 +831,7 @@ vuint32m1_t test_vwaddu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_tum( @@ -840,7 +840,7 @@ vuint32m1_t test_vwaddu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_tum( @@ -849,7 +849,7 @@ vuint32m2_t test_vwaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_tum( @@ -858,7 +858,7 @@ vuint32m2_t test_vwaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_wv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_tum( @@ -867,7 +867,7 @@ vuint32m2_t test_vwaddu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_tum( @@ -876,7 +876,7 @@ vuint32m2_t test_vwaddu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_tum( @@ -885,7 +885,7 @@ vuint32m4_t test_vwaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_tum( @@ -894,7 +894,7 @@ vuint32m4_t test_vwaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_wv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_tum( @@ -903,7 +903,7 @@ vuint32m4_t test_vwaddu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_tum( @@ -912,7 +912,7 @@ vuint32m4_t test_vwaddu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_tum( @@ -921,7 +921,7 @@ vuint32m8_t test_vwaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_tum( @@ -930,7 +930,7 @@ vuint32m8_t test_vwaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_wv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_tum( @@ -939,7 +939,7 @@ vuint32m8_t test_vwaddu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tum( @@ -948,7 +948,7 @@ vuint32m8_t test_vwaddu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tum( @@ -957,7 +957,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tum( @@ -966,7 +966,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_wv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tum( @@ -975,7 +975,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_tum( @@ -984,7 +984,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_tum( @@ -993,7 +993,7 @@ vuint64m2_t test_vwaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_tum( @@ -1002,7 +1002,7 @@ vuint64m2_t test_vwaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_wv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_tum( @@ -1011,7 +1011,7 @@ vuint64m2_t test_vwaddu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_tum( @@ -1020,7 +1020,7 @@ vuint64m2_t test_vwaddu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_tum( @@ -1029,7 +1029,7 @@ vuint64m4_t test_vwaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_tum( @@ -1038,7 +1038,7 @@ vuint64m4_t test_vwaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_wv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_tum( @@ -1047,7 +1047,7 @@ vuint64m4_t test_vwaddu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_tum( @@ -1056,7 +1056,7 @@ vuint64m4_t test_vwaddu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_tum( @@ -1065,7 +1065,7 @@ vuint64m8_t test_vwaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_tum( @@ -1074,7 +1074,7 @@ vuint64m8_t test_vwaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_wv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_tum( @@ -1083,7 +1083,7 @@ vuint64m8_t test_vwaddu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_tumu( @@ -1092,7 +1092,7 @@ vuint64m8_t test_vwaddu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_tumu( @@ -1101,7 +1101,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_tumu( @@ -1110,7 +1110,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_wv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_tumu( @@ -1119,7 +1119,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_tumu( @@ -1128,7 +1128,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_tumu( @@ -1137,7 +1137,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_tumu( @@ -1146,7 +1146,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_wv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_tumu( @@ -1155,7 +1155,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_tumu( @@ -1164,7 +1164,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_tumu( @@ -1173,7 +1173,7 @@ vuint16m1_t test_vwaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_tumu( @@ -1182,7 +1182,7 @@ vuint16m1_t test_vwaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_wv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_tumu( @@ -1191,7 +1191,7 @@ vuint16m1_t test_vwaddu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_tumu( @@ -1200,7 +1200,7 @@ vuint16m1_t test_vwaddu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_tumu( @@ -1209,7 +1209,7 @@ vuint16m2_t test_vwaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_tumu( @@ -1218,7 +1218,7 @@ vuint16m2_t test_vwaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_wv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_tumu( @@ -1227,7 +1227,7 @@ vuint16m2_t test_vwaddu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_tumu( @@ -1236,7 +1236,7 @@ vuint16m2_t test_vwaddu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_tumu( @@ -1245,7 +1245,7 @@ vuint16m4_t test_vwaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_tumu( @@ -1254,7 +1254,7 @@ vuint16m4_t test_vwaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_wv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_tumu( @@ -1263,7 +1263,7 @@ vuint16m4_t test_vwaddu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_tumu( @@ -1272,7 +1272,7 @@ vuint16m4_t test_vwaddu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_tumu( @@ -1281,7 +1281,7 @@ vuint16m8_t test_vwaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_tumu( @@ -1290,7 +1290,7 @@ vuint16m8_t test_vwaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_wv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_tumu( @@ -1299,7 +1299,7 @@ vuint16m8_t test_vwaddu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_tumu( @@ -1308,7 +1308,7 @@ vuint16m8_t test_vwaddu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_tumu( @@ -1317,7 +1317,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_tumu( @@ -1326,7 +1326,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_wv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_tumu( @@ -1335,7 +1335,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_tumu( @@ -1344,7 +1344,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_tumu( @@ -1353,7 +1353,7 @@ vuint32m1_t test_vwaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_tumu( @@ -1362,7 +1362,7 @@ vuint32m1_t test_vwaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_wv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_tumu( @@ -1371,7 +1371,7 @@ vuint32m1_t test_vwaddu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_tumu( @@ -1380,7 +1380,7 @@ vuint32m1_t test_vwaddu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_tumu( @@ -1389,7 +1389,7 @@ vuint32m2_t test_vwaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_tumu( @@ -1398,7 +1398,7 @@ vuint32m2_t test_vwaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_wv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_tumu( @@ -1407,7 +1407,7 @@ vuint32m2_t test_vwaddu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_tumu( @@ -1416,7 +1416,7 @@ vuint32m2_t test_vwaddu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_tumu( @@ -1425,7 +1425,7 @@ vuint32m4_t test_vwaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_tumu( @@ -1434,7 +1434,7 @@ vuint32m4_t test_vwaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_wv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_tumu( @@ -1443,7 +1443,7 @@ vuint32m4_t test_vwaddu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_tumu( @@ -1452,7 +1452,7 @@ vuint32m4_t test_vwaddu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_tumu( @@ -1461,7 +1461,7 @@ vuint32m8_t test_vwaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_tumu( @@ -1470,7 +1470,7 @@ vuint32m8_t test_vwaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_wv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_tumu( @@ -1479,7 +1479,7 @@ vuint32m8_t test_vwaddu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_tumu( @@ -1488,7 +1488,7 @@ vuint32m8_t test_vwaddu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_tumu( @@ -1497,7 +1497,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_tumu( @@ -1506,7 +1506,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_wv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_tumu( @@ -1515,7 +1515,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_tumu( @@ -1524,7 +1524,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_tumu( @@ -1533,7 +1533,7 @@ vuint64m2_t test_vwaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_tumu( @@ -1542,7 +1542,7 @@ vuint64m2_t test_vwaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_wv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_tumu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vwaddu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_tumu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vwaddu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_tumu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vwaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_tumu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vwaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_wv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_tumu( @@ -1587,7 +1587,7 @@ vuint64m4_t test_vwaddu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_tumu( @@ -1596,7 +1596,7 @@ vuint64m4_t test_vwaddu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_tumu( @@ -1605,7 +1605,7 @@ vuint64m8_t test_vwaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_tumu( @@ -1614,7 +1614,7 @@ vuint64m8_t test_vwaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_wv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_tumu( @@ -1623,7 +1623,7 @@ vuint64m8_t test_vwaddu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_mu( @@ -1632,7 +1632,7 @@ vuint64m8_t test_vwaddu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_mu( @@ -1641,7 +1641,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_mu( @@ -1650,7 +1650,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_wv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_mu( @@ -1659,7 +1659,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_mu( @@ -1668,7 +1668,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_mu( @@ -1677,7 +1677,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_mu( @@ -1686,7 +1686,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_wv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_mu( @@ -1695,7 +1695,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_mu( @@ -1704,7 +1704,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_mu( @@ -1713,7 +1713,7 @@ vuint16m1_t test_vwaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_mu( @@ -1722,7 +1722,7 @@ vuint16m1_t test_vwaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_wv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_mu( @@ -1731,7 +1731,7 @@ vuint16m1_t test_vwaddu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_mu( @@ -1740,7 +1740,7 @@ vuint16m1_t test_vwaddu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_mu( @@ -1749,7 +1749,7 @@ vuint16m2_t test_vwaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_mu( @@ -1758,7 +1758,7 @@ vuint16m2_t test_vwaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_wv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_mu( @@ -1767,7 +1767,7 @@ vuint16m2_t test_vwaddu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_mu( @@ -1776,7 +1776,7 @@ vuint16m2_t test_vwaddu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_mu( @@ -1785,7 +1785,7 @@ vuint16m4_t test_vwaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_mu( @@ -1794,7 +1794,7 @@ vuint16m4_t test_vwaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_wv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_mu( @@ -1803,7 +1803,7 @@ vuint16m4_t test_vwaddu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_mu( @@ -1812,7 +1812,7 @@ vuint16m4_t test_vwaddu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_mu( @@ -1821,7 +1821,7 @@ vuint16m8_t test_vwaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_mu( @@ -1830,7 +1830,7 @@ vuint16m8_t test_vwaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_wv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_mu( @@ -1839,7 +1839,7 @@ vuint16m8_t test_vwaddu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_mu( @@ -1848,7 +1848,7 @@ vuint16m8_t test_vwaddu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_mu( @@ -1857,7 +1857,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_mu( @@ -1866,7 +1866,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_wv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_mu( @@ -1875,7 +1875,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_mu( @@ -1884,7 +1884,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_mu( @@ -1893,7 +1893,7 @@ vuint32m1_t test_vwaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_mu( @@ -1902,7 +1902,7 @@ vuint32m1_t test_vwaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_wv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_mu( @@ -1911,7 +1911,7 @@ vuint32m1_t test_vwaddu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_mu( @@ -1920,7 +1920,7 @@ vuint32m1_t test_vwaddu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_mu( @@ -1929,7 +1929,7 @@ vuint32m2_t test_vwaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_mu( @@ -1938,7 +1938,7 @@ vuint32m2_t test_vwaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_wv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_mu( @@ -1947,7 +1947,7 @@ vuint32m2_t test_vwaddu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_mu( @@ -1956,7 +1956,7 @@ vuint32m2_t test_vwaddu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_mu( @@ -1965,7 +1965,7 @@ vuint32m4_t test_vwaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_mu( @@ -1974,7 +1974,7 @@ vuint32m4_t test_vwaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_wv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_mu( @@ -1983,7 +1983,7 @@ vuint32m4_t test_vwaddu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_mu( @@ -1992,7 +1992,7 @@ vuint32m4_t test_vwaddu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_mu( @@ -2001,7 +2001,7 @@ vuint32m8_t test_vwaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_mu( @@ -2010,7 +2010,7 @@ vuint32m8_t test_vwaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_wv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_mu( @@ -2019,7 +2019,7 @@ vuint32m8_t test_vwaddu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_mu( @@ -2028,7 +2028,7 @@ vuint32m8_t test_vwaddu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_mu( @@ -2037,7 +2037,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_mu( @@ -2046,7 +2046,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_wv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_mu( @@ -2055,7 +2055,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_mu( @@ -2064,7 +2064,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_mu( @@ -2073,7 +2073,7 @@ vuint64m2_t test_vwaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_mu( @@ -2082,7 +2082,7 @@ vuint64m2_t test_vwaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_wv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_mu( @@ -2091,7 +2091,7 @@ vuint64m2_t test_vwaddu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_mu( @@ -2100,7 +2100,7 @@ vuint64m2_t test_vwaddu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_mu( @@ -2109,7 +2109,7 @@ vuint64m4_t test_vwaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_mu( @@ -2118,7 +2118,7 @@ vuint64m4_t test_vwaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_wv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_mu( @@ -2127,7 +2127,7 @@ vuint64m4_t test_vwaddu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_mu( @@ -2136,7 +2136,7 @@ vuint64m4_t test_vwaddu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_mu( @@ -2145,7 +2145,7 @@ vuint64m8_t test_vwaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_mu( @@ -2154,7 +2154,7 @@ vuint64m8_t test_vwaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_wv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_mu( @@ -2163,6 +2163,6 @@ vuint64m8_t test_vwaddu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwaddu_wx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwcvt.c index dbc85ca2bc2b..578b15dcabda 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwcvt.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vwcvt_x_x_v_i16mf4_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_tu( @@ -21,7 +21,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vwcvt_x_x_v_i16mf2_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_tu( @@ -30,7 +30,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vwcvt_x_x_v_i16m1_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_tu( @@ -39,7 +39,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2_tu(vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vwcvt_x_x_v_i16m2_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_tu( @@ -48,7 +48,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_tu(vint16m2_t maskedoff, vint8m1_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4_tu(vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vwcvt_x_x_v_i16m4_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_tu( @@ -57,7 +57,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_tu(vint16m4_t maskedoff, vint8m2_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8_tu(vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vwcvt_x_x_v_i16m8_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_tu( @@ -66,7 +66,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_tu(vint16m8_t maskedoff, vint8m4_t src, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vwcvt_x_x_v_i32mf2_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_tu( @@ -75,7 +75,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vwcvt_x_x_v_i32m1_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_tu( @@ -84,7 +84,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2_tu(vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vwcvt_x_x_v_i32m2_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_tu( @@ -93,7 +93,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_tu(vint32m2_t maskedoff, vint16m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4_tu(vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vwcvt_x_x_v_i32m4_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_tu( @@ -102,7 +102,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_tu(vint32m4_t maskedoff, vint16m2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8_tu(vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vwcvt_x_x_v_i32m8_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tu( @@ -111,7 +111,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_tu(vint32m8_t maskedoff, vint16m4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vwcvt_x_x_v_i64m1_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_tu( @@ -120,7 +120,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t src, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2_tu(vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vwcvt_x_x_v_i64m2_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_tu( @@ -129,7 +129,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_tu(vint64m2_t maskedoff, vint32m1_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4_tu(vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vwcvt_x_x_v_i64m4_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_tu( @@ -138,7 +138,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_tu(vint64m4_t maskedoff, vint32m2_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8_tu(vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vwcvt_x_x_v_i64m8_tu(maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_tum( @@ -147,7 +147,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_tu(vint64m8_t maskedoff, vint32m4_t src, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vwcvt_x_x_v_i16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_tum( @@ -156,7 +156,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vwcvt_x_x_v_i16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_tum( @@ -165,7 +165,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vwcvt_x_x_v_i16m1_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_tum( @@ -174,7 +174,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vwcvt_x_x_v_i16m2_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_tum( @@ -183,7 +183,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vwcvt_x_x_v_i16m4_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_tum( @@ -192,7 +192,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vwcvt_x_x_v_i16m8_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_tum( @@ -201,7 +201,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vwcvt_x_x_v_i32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_tum( @@ -210,7 +210,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vwcvt_x_x_v_i32m1_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_tum( @@ -219,7 +219,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vwcvt_x_x_v_i32m2_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_tum( @@ -228,7 +228,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vwcvt_x_x_v_i32m4_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_tum( @@ -237,7 +237,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vwcvt_x_x_v_i32m8_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tum( @@ -246,7 +246,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vwcvt_x_x_v_i64m1_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_tum( @@ -255,7 +255,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vwcvt_x_x_v_i64m2_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_tum( @@ -264,7 +264,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vwcvt_x_x_v_i64m4_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_tum( @@ -273,7 +273,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vwcvt_x_x_v_i64m8_tum(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_tumu( @@ -282,7 +282,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vwcvt_x_x_v_i16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_tumu( @@ -291,7 +291,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vwcvt_x_x_v_i16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_tumu( @@ -300,7 +300,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vwcvt_x_x_v_i16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_tumu( @@ -309,7 +309,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vwcvt_x_x_v_i16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_tumu( @@ -318,7 +318,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vwcvt_x_x_v_i16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_tumu( @@ -327,7 +327,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vwcvt_x_x_v_i16m8_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_tumu( @@ -336,7 +336,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vwcvt_x_x_v_i32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_tumu( @@ -345,7 +345,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vwcvt_x_x_v_i32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_tumu( @@ -354,7 +354,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vwcvt_x_x_v_i32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_tumu( @@ -363,7 +363,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vwcvt_x_x_v_i32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_tumu( @@ -372,7 +372,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vwcvt_x_x_v_i32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tumu( @@ -381,7 +381,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vwcvt_x_x_v_i64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_tumu( @@ -390,7 +390,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vwcvt_x_x_v_i64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_tumu( @@ -399,7 +399,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vwcvt_x_x_v_i64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_tumu( @@ -408,7 +408,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vwcvt_x_x_v_i64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_mu( @@ -417,7 +417,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vwcvt_x_x_v_i16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_mu( @@ -426,7 +426,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vwcvt_x_x_v_i16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_mu( @@ -435,7 +435,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vwcvt_x_x_v_i16m1_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_mu( @@ -444,7 +444,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vwcvt_x_x_v_i16m2_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_mu( @@ -453,7 +453,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vwcvt_x_x_v_i16m4_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_mu( @@ -462,7 +462,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vwcvt_x_x_v_i16m8_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i16m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_mu( @@ -471,7 +471,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vwcvt_x_x_v_i32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_mu( @@ -480,7 +480,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vwcvt_x_x_v_i32m1_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_mu( @@ -489,7 +489,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vwcvt_x_x_v_i32m2_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_mu( @@ -498,7 +498,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vwcvt_x_x_v_i32m4_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_mu( @@ -507,7 +507,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vwcvt_x_x_v_i32m8_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_mu( @@ -516,7 +516,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vwcvt_x_x_v_i64m1_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_mu( @@ -525,7 +525,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vwcvt_x_x_v_i64m2_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_mu( @@ -534,7 +534,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vwcvt_x_x_v_i64m4_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_mu( @@ -543,6 +543,6 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vwcvt_x_x_v_i64m8_mu(mask, maskedoff, src, vl); + return __riscv_vwcvt_x_x_v_i64m8_mu(mask, maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwcvtu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwcvtu.c index c564320e602e..ac4f23404013 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwcvtu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwcvtu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf4_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16mf4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_tu( @@ -21,7 +21,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf2_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_tu( @@ -30,7 +30,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t src // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m1_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_tu( @@ -39,7 +39,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vwcvtu_x_x_v_u16m2_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_tu( @@ -48,7 +48,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m4_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_tu( @@ -57,7 +57,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vwcvtu_x_x_v_u16m8_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_tu( @@ -66,7 +66,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t src, si // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u32mf2_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32mf2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_tu( @@ -75,7 +75,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t sr // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m1_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_tu( @@ -84,7 +84,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vwcvtu_x_x_v_u32m2_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_tu( @@ -93,7 +93,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m4_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_tu( @@ -102,7 +102,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vwcvtu_x_x_v_u32m8_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tu( @@ -111,7 +111,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m1_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m1_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_tu( @@ -120,7 +120,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t src, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vwcvtu_x_x_v_u64m2_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m2_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_tu( @@ -129,7 +129,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m4_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m4_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_tu( @@ -138,7 +138,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vwcvtu_x_x_v_u64m8_tu(maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_tum( @@ -147,7 +147,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t src, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf4_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16mf4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_tum( @@ -156,7 +156,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf2_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_tum( @@ -165,7 +165,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m1_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_tum( @@ -174,7 +174,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vwcvtu_x_x_v_u16m2_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_tum( @@ -183,7 +183,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m4_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_tum( @@ -192,7 +192,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vwcvtu_x_x_v_u16m8_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_tum( @@ -201,7 +201,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u32mf2_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32mf2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_tum( @@ -210,7 +210,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m1_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_tum( @@ -219,7 +219,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vwcvtu_x_x_v_u32m2_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_tum( @@ -228,7 +228,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m4_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_tum( @@ -237,7 +237,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vwcvtu_x_x_v_u32m8_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tum( @@ -246,7 +246,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m1_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m1_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_tum( @@ -255,7 +255,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vwcvtu_x_x_v_u64m2_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m2_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_tum( @@ -264,7 +264,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m4_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m4_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_tum( @@ -273,7 +273,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vwcvtu_x_x_v_u64m8_tum(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m8_tum(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_tumu( @@ -282,7 +282,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf4_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16mf4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_tumu( @@ -291,7 +291,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_tumu( @@ -300,7 +300,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_tumu( @@ -309,7 +309,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vwcvtu_x_x_v_u16m2_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_tumu( @@ -318,7 +318,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_tumu( @@ -327,7 +327,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vwcvtu_x_x_v_u16m8_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_tumu( @@ -336,7 +336,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u32mf2_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32mf2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_tumu( @@ -345,7 +345,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m1_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_tumu( @@ -354,7 +354,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vwcvtu_x_x_v_u32m2_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_tumu( @@ -363,7 +363,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m4_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_tumu( @@ -372,7 +372,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vwcvtu_x_x_v_u32m8_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tumu( @@ -381,7 +381,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m1_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_tumu( @@ -390,7 +390,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vwcvtu_x_x_v_u64m2_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m2_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_tumu( @@ -399,7 +399,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m4_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_tumu( @@ -408,7 +408,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vwcvtu_x_x_v_u64m8_tumu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m8_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_mu( @@ -417,7 +417,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf4_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16mf4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_mu( @@ -426,7 +426,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u16mf2_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_mu( @@ -435,7 +435,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m1_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_mu( @@ -444,7 +444,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vwcvtu_x_x_v_u16m2_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_mu( @@ -453,7 +453,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vwcvtu_x_x_v_u16m4_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_mu( @@ -462,7 +462,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vwcvtu_x_x_v_u16m8_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u16m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_mu( @@ -471,7 +471,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vwcvtu_x_x_v_u32mf2_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32mf2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_mu( @@ -480,7 +480,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m1_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_mu( @@ -489,7 +489,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vwcvtu_x_x_v_u32m2_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_mu( @@ -498,7 +498,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vwcvtu_x_x_v_u32m4_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_mu( @@ -507,7 +507,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vwcvtu_x_x_v_u32m8_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u32m8_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_mu( @@ -516,7 +516,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m1_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m1_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_mu( @@ -525,7 +525,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vwcvtu_x_x_v_u64m2_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m2_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_mu( @@ -534,7 +534,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vwcvtu_x_x_v_u64m4_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m4_mu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_mu( @@ -543,6 +543,6 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vwcvtu_x_x_v_u64m8_mu(mask, maskedoff, src, vl); + return __riscv_vwcvtu_x_x_v_u64m8_mu(mask, maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmacc.c index cec2f1e694fb..bcb652e3df24 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmacc.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vv_i16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_tu( @@ -22,7 +22,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vx_i16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_tu( @@ -31,7 +31,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vv_i16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_tu( @@ -40,7 +40,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vx_i16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_tu( @@ -49,7 +49,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vv_i16m1_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_tu( @@ -58,7 +58,7 @@ vint16m1_t test_vwmacc_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vx_i16m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_tu( @@ -67,7 +67,7 @@ vint16m1_t test_vwmacc_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vv_i16m2_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_tu( @@ -76,7 +76,7 @@ vint16m2_t test_vwmacc_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vx_i16m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_tu( @@ -85,7 +85,7 @@ vint16m2_t test_vwmacc_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vv_i16m4_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_tu( @@ -94,7 +94,7 @@ vint16m4_t test_vwmacc_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vx_i16m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_tu( @@ -103,7 +103,7 @@ vint16m4_t test_vwmacc_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vv_i16m8_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_tu( @@ -112,7 +112,7 @@ vint16m8_t test_vwmacc_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vx_i16m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_tu( @@ -121,7 +121,7 @@ vint16m8_t test_vwmacc_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vv_i32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_tu( @@ -130,7 +130,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vx_i32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_tu( @@ -139,7 +139,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vv_i32m1_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_tu( @@ -148,7 +148,7 @@ vint32m1_t test_vwmacc_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vx_i32m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_tu( @@ -157,7 +157,7 @@ vint32m1_t test_vwmacc_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vv_i32m2_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_tu( @@ -166,7 +166,7 @@ vint32m2_t test_vwmacc_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vx_i32m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_tu( @@ -175,7 +175,7 @@ vint32m2_t test_vwmacc_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vv_i32m4_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_tu( @@ -184,7 +184,7 @@ vint32m4_t test_vwmacc_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vx_i32m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_tu( @@ -193,7 +193,7 @@ vint32m4_t test_vwmacc_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vv_i32m8_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_tu( @@ -202,7 +202,7 @@ vint32m8_t test_vwmacc_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vx_i32m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tu( @@ -211,7 +211,7 @@ vint32m8_t test_vwmacc_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vv_i64m1_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tu( @@ -220,7 +220,7 @@ vint64m1_t test_vwmacc_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vx_i64m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_tu( @@ -229,7 +229,7 @@ vint64m1_t test_vwmacc_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vv_i64m2_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_tu( @@ -238,7 +238,7 @@ vint64m2_t test_vwmacc_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vx_i64m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_tu( @@ -247,7 +247,7 @@ vint64m2_t test_vwmacc_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vv_i64m4_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_tu( @@ -256,7 +256,7 @@ vint64m4_t test_vwmacc_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vx_i64m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_tu( @@ -265,7 +265,7 @@ vint64m4_t test_vwmacc_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vv_i64m8_tu(vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_tu( @@ -274,7 +274,7 @@ vint64m8_t test_vwmacc_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vx_i64m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_tum( @@ -283,7 +283,7 @@ vint64m8_t test_vwmacc_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_tum( @@ -292,7 +292,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_tum( @@ -301,7 +301,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_tum( @@ -310,7 +310,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_tum( @@ -319,7 +319,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vv_i16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_tum( @@ -328,7 +328,7 @@ vint16m1_t test_vwmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vx_i16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_tum( @@ -337,7 +337,7 @@ vint16m1_t test_vwmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vv_i16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_tum( @@ -346,7 +346,7 @@ vint16m2_t test_vwmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vx_i16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_tum( @@ -355,7 +355,7 @@ vint16m2_t test_vwmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vv_i16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_tum( @@ -364,7 +364,7 @@ vint16m4_t test_vwmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vx_i16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_tum( @@ -373,7 +373,7 @@ vint16m4_t test_vwmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vv_i16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_tum( @@ -382,7 +382,7 @@ vint16m8_t test_vwmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vx_i16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_tum( @@ -391,7 +391,7 @@ vint16m8_t test_vwmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_tum( @@ -400,7 +400,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_tum( @@ -409,7 +409,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vv_i32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_tum( @@ -418,7 +418,7 @@ vint32m1_t test_vwmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vx_i32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_tum( @@ -427,7 +427,7 @@ vint32m1_t test_vwmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vv_i32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_tum( @@ -436,7 +436,7 @@ vint32m2_t test_vwmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vx_i32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_tum( @@ -445,7 +445,7 @@ vint32m2_t test_vwmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vv_i32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_tum( @@ -454,7 +454,7 @@ vint32m4_t test_vwmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vx_i32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_tum( @@ -463,7 +463,7 @@ vint32m4_t test_vwmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vv_i32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_tum( @@ -472,7 +472,7 @@ vint32m8_t test_vwmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vx_i32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tum( @@ -481,7 +481,7 @@ vint32m8_t test_vwmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vv_i64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tum( @@ -490,7 +490,7 @@ vint64m1_t test_vwmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vx_i64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_tum( @@ -499,7 +499,7 @@ vint64m1_t test_vwmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vv_i64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_tum( @@ -508,7 +508,7 @@ vint64m2_t test_vwmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vx_i64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_tum( @@ -517,7 +517,7 @@ vint64m2_t test_vwmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vv_i64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_tum( @@ -526,7 +526,7 @@ vint64m4_t test_vwmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vx_i64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_tum( @@ -535,7 +535,7 @@ vint64m4_t test_vwmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vv_i64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_tum( @@ -544,7 +544,7 @@ vint64m8_t test_vwmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vx_i64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_tumu( @@ -553,7 +553,7 @@ vint64m8_t test_vwmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_tumu( @@ -562,7 +562,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_tumu( @@ -571,7 +571,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_tumu( @@ -580,7 +580,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_tumu( @@ -589,7 +589,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_tumu( @@ -598,7 +598,7 @@ vint16m1_t test_vwmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_tumu( @@ -607,7 +607,7 @@ vint16m1_t test_vwmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_tumu( @@ -616,7 +616,7 @@ vint16m2_t test_vwmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_tumu( @@ -625,7 +625,7 @@ vint16m2_t test_vwmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_tumu( @@ -634,7 +634,7 @@ vint16m4_t test_vwmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_tumu( @@ -643,7 +643,7 @@ vint16m4_t test_vwmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_tumu( @@ -652,7 +652,7 @@ vint16m8_t test_vwmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_tumu( @@ -661,7 +661,7 @@ vint16m8_t test_vwmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_tumu( @@ -670,7 +670,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_tumu( @@ -679,7 +679,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_tumu( @@ -688,7 +688,7 @@ vint32m1_t test_vwmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_tumu( @@ -697,7 +697,7 @@ vint32m1_t test_vwmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_tumu( @@ -706,7 +706,7 @@ vint32m2_t test_vwmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_tumu( @@ -715,7 +715,7 @@ vint32m2_t test_vwmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_tumu( @@ -724,7 +724,7 @@ vint32m4_t test_vwmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_tumu( @@ -733,7 +733,7 @@ vint32m4_t test_vwmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_tumu( @@ -742,7 +742,7 @@ vint32m8_t test_vwmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_tumu( @@ -751,7 +751,7 @@ vint32m8_t test_vwmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_tumu( @@ -760,7 +760,7 @@ vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_tumu( @@ -769,7 +769,7 @@ vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_tumu( @@ -778,7 +778,7 @@ vint64m2_t test_vwmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_tumu( @@ -787,7 +787,7 @@ vint64m2_t test_vwmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_tumu( @@ -796,7 +796,7 @@ vint64m4_t test_vwmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_tumu( @@ -805,7 +805,7 @@ vint64m4_t test_vwmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_tumu( @@ -814,7 +814,7 @@ vint64m8_t test_vwmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_mu( @@ -823,7 +823,7 @@ vint64m8_t test_vwmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_mu( @@ -832,7 +832,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmacc_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_mu( @@ -841,7 +841,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_mu( @@ -850,7 +850,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmacc_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_mu( @@ -859,7 +859,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vv_i16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_mu( @@ -868,7 +868,7 @@ vint16m1_t test_vwmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmacc_vx_i16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_mu( @@ -877,7 +877,7 @@ vint16m1_t test_vwmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vv_i16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_mu( @@ -886,7 +886,7 @@ vint16m2_t test_vwmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmacc_vx_i16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_mu( @@ -895,7 +895,7 @@ vint16m2_t test_vwmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vv_i16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_mu( @@ -904,7 +904,7 @@ vint16m4_t test_vwmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmacc_vx_i16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_mu( @@ -913,7 +913,7 @@ vint16m4_t test_vwmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vv_i16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_mu( @@ -922,7 +922,7 @@ vint16m8_t test_vwmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmacc_vx_i16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_mu( @@ -931,7 +931,7 @@ vint16m8_t test_vwmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_mu( @@ -940,7 +940,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmacc_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_mu( @@ -949,7 +949,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vv_i32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_mu( @@ -958,7 +958,7 @@ vint32m1_t test_vwmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmacc_vx_i32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_mu( @@ -967,7 +967,7 @@ vint32m1_t test_vwmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vv_i32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_mu( @@ -976,7 +976,7 @@ vint32m2_t test_vwmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmacc_vx_i32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_mu( @@ -985,7 +985,7 @@ vint32m2_t test_vwmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vv_i32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_mu( @@ -994,7 +994,7 @@ vint32m4_t test_vwmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmacc_vx_i32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_mu( @@ -1003,7 +1003,7 @@ vint32m4_t test_vwmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vv_i32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_mu( @@ -1012,7 +1012,7 @@ vint32m8_t test_vwmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmacc_vx_i32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_mu( @@ -1021,7 +1021,7 @@ vint32m8_t test_vwmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vv_i64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_mu( @@ -1030,7 +1030,7 @@ vint64m1_t test_vwmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmacc_vx_i64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_mu( @@ -1039,7 +1039,7 @@ vint64m1_t test_vwmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vv_i64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_mu( @@ -1048,7 +1048,7 @@ vint64m2_t test_vwmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmacc_vx_i64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_mu( @@ -1057,7 +1057,7 @@ vint64m2_t test_vwmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vv_i64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_mu( @@ -1066,7 +1066,7 @@ vint64m4_t test_vwmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmacc_vx_i64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_mu( @@ -1075,7 +1075,7 @@ vint64m4_t test_vwmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vv_i64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmacc_vv_i64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_mu( @@ -1084,6 +1084,6 @@ vint64m8_t test_vwmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmacc_vx_i64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmacc_vx_i64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccsu.c index 6272ebb6b2b3..dcf59e007209 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccsu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_tu( @@ -22,7 +22,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_tu( @@ -31,7 +31,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vuint8mf8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_tu( @@ -40,7 +40,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_tu( @@ -49,7 +49,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vuint8mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m1_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_tu( @@ -58,7 +58,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_tu( @@ -67,7 +67,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vv_i16m2_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_tu( @@ -76,7 +76,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vx_i16m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_tu( @@ -85,7 +85,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m4_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_tu( @@ -94,7 +94,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_tu( @@ -103,7 +103,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vv_i16m8_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_tu( @@ -112,7 +112,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vx_i16m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_tu( @@ -121,7 +121,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_tu( @@ -130,7 +130,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_tu( @@ -139,7 +139,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vuint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m1_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_tu( @@ -148,7 +148,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_tu( @@ -157,7 +157,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vv_i32m2_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_tu( @@ -166,7 +166,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vx_i32m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_tu( @@ -175,7 +175,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m4_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_tu( @@ -184,7 +184,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_tu( @@ -193,7 +193,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vv_i32m8_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_tu( @@ -202,7 +202,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vx_i32m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tu( @@ -211,7 +211,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m1_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tu( @@ -220,7 +220,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_tu( @@ -229,7 +229,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vv_i64m2_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_tu( @@ -238,7 +238,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vx_i64m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_tu( @@ -247,7 +247,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m4_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_tu( @@ -256,7 +256,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_tu( @@ -265,7 +265,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vv_i64m8_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_tu( @@ -274,7 +274,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vx_i64m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_tum( @@ -283,7 +283,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_tum( @@ -292,7 +292,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_tum( @@ -301,7 +301,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_tum( @@ -310,7 +310,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_tum( @@ -319,7 +319,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_tum( @@ -328,7 +328,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_tum( @@ -337,7 +337,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vv_i16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_tum( @@ -346,7 +346,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vx_i16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_tum( @@ -355,7 +355,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_tum( @@ -364,7 +364,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_tum( @@ -373,7 +373,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vv_i16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_tum( @@ -382,7 +382,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vx_i16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_tum( @@ -391,7 +391,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_tum( @@ -400,7 +400,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_tum( @@ -409,7 +409,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_tum( @@ -418,7 +418,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_tum( @@ -427,7 +427,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vv_i32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_tum( @@ -436,7 +436,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vx_i32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_tum( @@ -445,7 +445,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_tum( @@ -454,7 +454,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_tum( @@ -463,7 +463,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vv_i32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_tum( @@ -472,7 +472,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vx_i32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tum( @@ -481,7 +481,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tum( @@ -490,7 +490,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_tum( @@ -499,7 +499,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vv_i64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_tum( @@ -508,7 +508,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vx_i64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_tum( @@ -517,7 +517,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_tum( @@ -526,7 +526,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_tum( @@ -535,7 +535,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vv_i64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_tum( @@ -544,7 +544,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vx_i64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_tumu( @@ -553,7 +553,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_tumu( @@ -562,7 +562,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_tumu( @@ -571,7 +571,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_tumu( @@ -580,7 +580,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_tumu( @@ -589,7 +589,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_tumu( @@ -598,7 +598,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_tumu( @@ -607,7 +607,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_tumu( @@ -616,7 +616,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_tumu( @@ -625,7 +625,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_tumu( @@ -634,7 +634,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_tumu( @@ -643,7 +643,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_tumu( @@ -652,7 +652,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_tumu( @@ -661,7 +661,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_tumu( @@ -670,7 +670,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_tumu( @@ -679,7 +679,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_tumu( @@ -688,7 +688,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_tumu( @@ -697,7 +697,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_tumu( @@ -706,7 +706,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_tumu( @@ -715,7 +715,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_tumu( @@ -724,7 +724,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_tumu( @@ -733,7 +733,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_tumu( @@ -742,7 +742,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_tumu( @@ -751,7 +751,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_tumu( @@ -760,7 +760,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_tumu( @@ -769,7 +769,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_tumu( @@ -778,7 +778,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_tumu( @@ -787,7 +787,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_tumu( @@ -796,7 +796,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_tumu( @@ -805,7 +805,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_tumu( @@ -814,7 +814,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_mu( @@ -823,7 +823,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_mu( @@ -832,7 +832,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_mu( @@ -841,7 +841,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_mu( @@ -850,7 +850,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_mu( @@ -859,7 +859,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_mu( @@ -868,7 +868,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_mu( @@ -877,7 +877,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vv_i16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_mu( @@ -886,7 +886,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccsu_vx_i16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_mu( @@ -895,7 +895,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vv_i16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_mu( @@ -904,7 +904,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccsu_vx_i16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_mu( @@ -913,7 +913,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vv_i16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_mu( @@ -922,7 +922,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccsu_vx_i16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_mu( @@ -931,7 +931,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_mu( @@ -940,7 +940,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccsu_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_mu( @@ -949,7 +949,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_mu( @@ -958,7 +958,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_mu( @@ -967,7 +967,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vv_i32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_mu( @@ -976,7 +976,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccsu_vx_i32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_mu( @@ -985,7 +985,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vv_i32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_mu( @@ -994,7 +994,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccsu_vx_i32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_mu( @@ -1003,7 +1003,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vv_i32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_mu( @@ -1012,7 +1012,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccsu_vx_i32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_mu( @@ -1021,7 +1021,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_mu( @@ -1030,7 +1030,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_mu( @@ -1039,7 +1039,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vv_i64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_mu( @@ -1048,7 +1048,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccsu_vx_i64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_mu( @@ -1057,7 +1057,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vv_i64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_mu( @@ -1066,7 +1066,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccsu_vx_i64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_mu( @@ -1075,7 +1075,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vv_i64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccsu_vv_i64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_mu( @@ -1084,6 +1084,6 @@ vint64m8_t test_vwmaccsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccsu_vx_i64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccsu_vx_i64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccu.c index 16b51be8af82..e8883c1f2af8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccu.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vv_u16mf4_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_tu( @@ -22,7 +22,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_tu(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vx_u16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_tu( @@ -31,7 +31,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_tu(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vv_u16mf2_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_tu( @@ -40,7 +40,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_tu(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vx_u16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_tu( @@ -49,7 +49,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_tu(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vv_u16m1_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_tu( @@ -58,7 +58,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_tu(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vx_u16m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_tu( @@ -67,7 +67,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_tu(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vv_u16m2_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_tu( @@ -76,7 +76,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_tu(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vx_u16m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_tu( @@ -85,7 +85,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_tu(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vv_u16m4_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_tu( @@ -94,7 +94,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_tu(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vx_u16m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_tu( @@ -103,7 +103,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_tu(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vv_u16m8_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_tu( @@ -112,7 +112,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_tu(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vx_u16m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_tu( @@ -121,7 +121,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_tu(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vv_u32mf2_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32mf2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_tu( @@ -130,7 +130,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_tu(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vx_u32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_tu( @@ -139,7 +139,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_tu(vuint32mf2_t vd, uint16_t rs1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vv_u32m1_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_tu( @@ -148,7 +148,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_tu(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vx_u32m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_tu( @@ -157,7 +157,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_tu(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vv_u32m2_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_tu( @@ -166,7 +166,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_tu(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vx_u32m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_tu( @@ -175,7 +175,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_tu(vuint32m2_t vd, uint16_t rs1, vuint16m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vv_u32m4_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_tu( @@ -184,7 +184,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_tu(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vx_u32m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_tu( @@ -193,7 +193,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_tu(vuint32m4_t vd, uint16_t rs1, vuint16m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vv_u32m8_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_tu( @@ -202,7 +202,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_tu(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vx_u32m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tu( @@ -211,7 +211,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_tu(vuint32m8_t vd, uint16_t rs1, vuint16m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vv_u64m1_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m1_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tu( @@ -220,7 +220,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_tu(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vx_u64m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_tu( @@ -229,7 +229,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_tu(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vv_u64m2_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m2_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_tu( @@ -238,7 +238,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_tu(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vx_u64m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_tu( @@ -247,7 +247,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_tu(vuint64m2_t vd, uint32_t rs1, vuint32m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vv_u64m4_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m4_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_tu( @@ -256,7 +256,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_tu(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vx_u64m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_tu( @@ -265,7 +265,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_tu(vuint64m4_t vd, uint32_t rs1, vuint32m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vv_u64m8_tu(vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m8_tu(vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_tu( @@ -274,7 +274,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_tu(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vx_u64m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_tum( @@ -283,7 +283,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8_tu(vuint64m8_t vd, uint32_t rs1, vuint32m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_tum( @@ -292,7 +292,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_tum( @@ -301,7 +301,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_tum( @@ -310,7 +310,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_tum( @@ -319,7 +319,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vv_u16m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_tum( @@ -328,7 +328,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vx_u16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_tum( @@ -337,7 +337,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vv_u16m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_tum( @@ -346,7 +346,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vx_u16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_tum( @@ -355,7 +355,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vv_u16m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_tum( @@ -364,7 +364,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vx_u16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_tum( @@ -373,7 +373,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vv_u16m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_tum( @@ -382,7 +382,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vx_u16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_tum( @@ -391,7 +391,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_tum( @@ -400,7 +400,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_tum( @@ -409,7 +409,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vv_u32m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_tum( @@ -418,7 +418,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vx_u32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_tum( @@ -427,7 +427,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vv_u32m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_tum( @@ -436,7 +436,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vx_u32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_tum( @@ -445,7 +445,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vv_u32m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_tum( @@ -454,7 +454,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vx_u32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_tum( @@ -463,7 +463,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vv_u32m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_tum( @@ -472,7 +472,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vx_u32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tum( @@ -481,7 +481,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vv_u64m1_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m1_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tum( @@ -490,7 +490,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vx_u64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_tum( @@ -499,7 +499,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vv_u64m2_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m2_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_tum( @@ -508,7 +508,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vx_u64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_tum( @@ -517,7 +517,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vv_u64m4_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m4_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_tum( @@ -526,7 +526,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vx_u64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_tum( @@ -535,7 +535,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vv_u64m8_tum(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m8_tum(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_tum( @@ -544,7 +544,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vx_u64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_tumu( @@ -553,7 +553,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_tumu( @@ -562,7 +562,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_tumu( @@ -571,7 +571,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_tumu( @@ -580,7 +580,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_tumu( @@ -589,7 +589,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_tumu( @@ -598,7 +598,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_tumu( @@ -607,7 +607,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint8_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_tumu( @@ -616,7 +616,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_tumu( @@ -625,7 +625,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_tumu( @@ -634,7 +634,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_tumu( @@ -643,7 +643,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_tumu( @@ -652,7 +652,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_tumu( @@ -661,7 +661,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_tumu( @@ -670,7 +670,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_tumu( @@ -679,7 +679,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_tumu( @@ -688,7 +688,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_tumu( @@ -697,7 +697,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_tumu( @@ -706,7 +706,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_tumu( @@ -715,7 +715,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_tumu( @@ -724,7 +724,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_tumu( @@ -733,7 +733,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_tumu( @@ -742,7 +742,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_tumu( @@ -751,7 +751,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_tumu( @@ -760,7 +760,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_tumu( @@ -769,7 +769,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_tumu( @@ -778,7 +778,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_tumu( @@ -787,7 +787,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_tumu( @@ -796,7 +796,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_tumu( @@ -805,7 +805,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_tumu( @@ -814,7 +814,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_mu( @@ -823,7 +823,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_mu( @@ -832,7 +832,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return vwmaccu_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_mu( @@ -841,7 +841,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_mu( @@ -850,7 +850,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return vwmaccu_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_mu( @@ -859,7 +859,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vv_u16m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_mu( @@ -868,7 +868,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return vwmaccu_vx_u16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_mu( @@ -877,7 +877,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vv_u16m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_mu( @@ -886,7 +886,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return vwmaccu_vx_u16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_mu( @@ -895,7 +895,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vv_u16m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_mu( @@ -904,7 +904,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return vwmaccu_vx_u16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_mu( @@ -913,7 +913,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vv_u16m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u16m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_mu( @@ -922,7 +922,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return vwmaccu_vx_u16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_mu( @@ -931,7 +931,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_mu( @@ -940,7 +940,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return vwmaccu_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_mu( @@ -949,7 +949,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vv_u32m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_mu( @@ -958,7 +958,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return vwmaccu_vx_u32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_mu( @@ -967,7 +967,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vv_u32m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_mu( @@ -976,7 +976,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return vwmaccu_vx_u32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_mu( @@ -985,7 +985,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vv_u32m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_mu( @@ -994,7 +994,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return vwmaccu_vx_u32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_mu( @@ -1003,7 +1003,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vv_u32m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u32m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_mu( @@ -1012,7 +1012,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return vwmaccu_vx_u32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_mu( @@ -1021,7 +1021,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vv_u64m1_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m1_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_mu( @@ -1030,7 +1030,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return vwmaccu_vx_u64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_mu( @@ -1039,7 +1039,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vv_u64m2_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m2_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_mu( @@ -1048,7 +1048,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return vwmaccu_vx_u64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_mu( @@ -1057,7 +1057,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vv_u64m4_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m4_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_mu( @@ -1066,7 +1066,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return vwmaccu_vx_u64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_mu( @@ -1075,7 +1075,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vv_u64m8_mu(mask, vd, vs1, vs2, vl); + return __riscv_vwmaccu_vv_u64m8_mu(mask, vd, vs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_mu( @@ -1084,6 +1084,6 @@ vuint64m8_t test_vwmaccu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return vwmaccu_vx_u64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccu_vx_u64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccus.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccus.c index 2ff8d739ee58..14c6123a6afc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccus.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmaccus.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_tu(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmaccus_vx_i16mf4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_tu( @@ -22,7 +22,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_tu(vint16mf4_t vd, uint8_t rs1, vint8mf8_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_tu(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmaccus_vx_i16mf2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_tu( @@ -31,7 +31,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_tu(vint16mf2_t vd, uint8_t rs1, vint8mf4_t v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_tu(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmaccus_vx_i16m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_tu( @@ -40,7 +40,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_tu(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_tu(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmaccus_vx_i16m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_tu( @@ -49,7 +49,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_tu(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_tu(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmaccus_vx_i16m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_tu( @@ -58,7 +58,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_tu(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_tu(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmaccus_vx_i16m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_tu( @@ -67,7 +67,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_tu(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_tu(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmaccus_vx_i32mf2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32mf2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_tu( @@ -76,7 +76,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_tu(vint32mf2_t vd, uint16_t rs1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_tu(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmaccus_vx_i32m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_tu( @@ -85,7 +85,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_tu(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_tu(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmaccus_vx_i32m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_tu( @@ -94,7 +94,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_tu(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_tu(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmaccus_vx_i32m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_tu( @@ -103,7 +103,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_tu(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_tu(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmaccus_vx_i32m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tu( @@ -112,7 +112,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_tu(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_tu(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmaccus_vx_i64m1_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m1_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_tu( @@ -121,7 +121,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_tu(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_tu(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmaccus_vx_i64m2_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m2_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_tu( @@ -130,7 +130,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_tu(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_tu(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmaccus_vx_i64m4_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m4_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_tu( @@ -139,7 +139,7 @@ vint64m4_t test_vwmaccus_vx_i64m4_tu(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_tu(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmaccus_vx_i64m8_tu(vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m8_tu(vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_tum( @@ -148,7 +148,7 @@ vint64m8_t test_vwmaccus_vx_i64m8_tu(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmaccus_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_tum( @@ -157,7 +157,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmaccus_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_tum( @@ -166,7 +166,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmaccus_vx_i16m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_tum( @@ -175,7 +175,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmaccus_vx_i16m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_tum( @@ -184,7 +184,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmaccus_vx_i16m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_tum( @@ -193,7 +193,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmaccus_vx_i16m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_tum( @@ -202,7 +202,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmaccus_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_tum( @@ -211,7 +211,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmaccus_vx_i32m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_tum( @@ -220,7 +220,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmaccus_vx_i32m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_tum( @@ -229,7 +229,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmaccus_vx_i32m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_tum( @@ -238,7 +238,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmaccus_vx_i32m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tum( @@ -247,7 +247,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmaccus_vx_i64m1_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m1_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_tum( @@ -256,7 +256,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmaccus_vx_i64m2_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m2_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_tum( @@ -265,7 +265,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmaccus_vx_i64m4_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m4_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_tum( @@ -274,7 +274,7 @@ vint64m4_t test_vwmaccus_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmaccus_vx_i64m8_tum(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m8_tum(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_tumu( @@ -283,7 +283,7 @@ vint64m8_t test_vwmaccus_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmaccus_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_tumu( @@ -292,7 +292,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmaccus_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_tumu( @@ -301,7 +301,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmaccus_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_tumu( @@ -310,7 +310,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, uint8_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmaccus_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_tumu( @@ -319,7 +319,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmaccus_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_tumu( @@ -328,7 +328,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmaccus_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_tumu( @@ -337,7 +337,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, uint8_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmaccus_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_tumu( @@ -346,7 +346,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, uint16_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmaccus_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_tumu( @@ -355,7 +355,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmaccus_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_tumu( @@ -364,7 +364,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, uint16_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmaccus_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_tumu( @@ -373,7 +373,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmaccus_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_tumu( @@ -382,7 +382,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, uint16_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmaccus_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_tumu( @@ -391,7 +391,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmaccus_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_tumu( @@ -400,7 +400,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmaccus_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_tumu( @@ -409,7 +409,7 @@ vint64m4_t test_vwmaccus_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, uint32_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmaccus_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_mu( @@ -418,7 +418,7 @@ vint64m8_t test_vwmaccus_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, uint32_t rs // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return vwmaccus_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_mu( @@ -427,7 +427,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, uint8_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return vwmaccus_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_mu( @@ -436,7 +436,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, uint8_t r // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return vwmaccus_vx_i16m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_mu( @@ -445,7 +445,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return vwmaccus_vx_i16m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_mu( @@ -454,7 +454,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return vwmaccus_vx_i16m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_mu( @@ -463,7 +463,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return vwmaccus_vx_i16m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i16m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_mu( @@ -472,7 +472,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, uint8_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return vwmaccus_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_mu( @@ -481,7 +481,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return vwmaccus_vx_i32m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_mu( @@ -490,7 +490,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return vwmaccus_vx_i32m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_mu( @@ -499,7 +499,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, uint16_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return vwmaccus_vx_i32m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_mu( @@ -508,7 +508,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return vwmaccus_vx_i32m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i32m8_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_mu( @@ -517,7 +517,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, uint16_t rs1, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return vwmaccus_vx_i64m1_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m1_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_mu( @@ -526,7 +526,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return vwmaccus_vx_i64m2_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m2_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_mu( @@ -535,7 +535,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return vwmaccus_vx_i64m4_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m4_mu(mask, vd, rs1, vs2, vl); } // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_mu( @@ -544,6 +544,6 @@ vint64m4_t test_vwmaccus_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, uint32_t rs1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return vwmaccus_vx_i64m8_mu(mask, vd, rs1, vs2, vl); + return __riscv_vwmaccus_vx_i64m8_mu(mask, vd, rs1, vs2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmul.c index 343baa8c410b..6c7c325eb137 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmul.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwmul_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_tu( @@ -21,7 +21,7 @@ vint16mf4_t test_vwmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_tu( @@ -30,7 +30,7 @@ vint16mf4_t test_vwmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwmul_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_tu( @@ -39,7 +39,7 @@ vint16mf2_t test_vwmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_tu( @@ -48,7 +48,7 @@ vint16mf2_t test_vwmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwmul_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_tu( @@ -57,7 +57,7 @@ vint16m1_t test_vwmul_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_tu( @@ -66,7 +66,7 @@ vint16m1_t test_vwmul_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwmul_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_tu( @@ -75,7 +75,7 @@ vint16m2_t test_vwmul_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_tu( @@ -84,7 +84,7 @@ vint16m2_t test_vwmul_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwmul_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_tu( @@ -93,7 +93,7 @@ vint16m4_t test_vwmul_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_tu( @@ -102,7 +102,7 @@ vint16m4_t test_vwmul_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwmul_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_tu( @@ -111,7 +111,7 @@ vint16m8_t test_vwmul_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_tu( @@ -120,7 +120,7 @@ vint16m8_t test_vwmul_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwmul_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_tu( @@ -129,7 +129,7 @@ vint32mf2_t test_vwmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_tu( @@ -138,7 +138,7 @@ vint32mf2_t test_vwmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwmul_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vwmul_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_tu( @@ -156,7 +156,7 @@ vint32m1_t test_vwmul_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwmul_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_tu( @@ -165,7 +165,7 @@ vint32m2_t test_vwmul_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_tu( @@ -174,7 +174,7 @@ vint32m2_t test_vwmul_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwmul_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_tu( @@ -183,7 +183,7 @@ vint32m4_t test_vwmul_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_tu( @@ -192,7 +192,7 @@ vint32m4_t test_vwmul_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwmul_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_tu( @@ -201,7 +201,7 @@ vint32m8_t test_vwmul_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tu( @@ -210,7 +210,7 @@ vint32m8_t test_vwmul_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwmul_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tu( @@ -219,7 +219,7 @@ vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_tu( @@ -228,7 +228,7 @@ vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwmul_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_tu( @@ -237,7 +237,7 @@ vint64m2_t test_vwmul_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_tu( @@ -246,7 +246,7 @@ vint64m2_t test_vwmul_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwmul_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_tu( @@ -255,7 +255,7 @@ vint64m4_t test_vwmul_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_tu( @@ -264,7 +264,7 @@ vint64m4_t test_vwmul_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwmul_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_tu( @@ -273,7 +273,7 @@ vint64m8_t test_vwmul_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_tum( @@ -282,7 +282,7 @@ vint64m8_t test_vwmul_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_tum( @@ -291,7 +291,7 @@ vint16mf4_t test_vwmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_tum( @@ -300,7 +300,7 @@ vint16mf4_t test_vwmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_tum( @@ -309,7 +309,7 @@ vint16mf2_t test_vwmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_tum( @@ -318,7 +318,7 @@ vint16mf2_t test_vwmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwmul_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_tum( @@ -327,7 +327,7 @@ vint16m1_t test_vwmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_tum( @@ -336,7 +336,7 @@ vint16m1_t test_vwmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwmul_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_tum( @@ -345,7 +345,7 @@ vint16m2_t test_vwmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_tum( @@ -354,7 +354,7 @@ vint16m2_t test_vwmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwmul_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_tum( @@ -363,7 +363,7 @@ vint16m4_t test_vwmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_tum( @@ -372,7 +372,7 @@ vint16m4_t test_vwmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwmul_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_tum( @@ -381,7 +381,7 @@ vint16m8_t test_vwmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_tum( @@ -390,7 +390,7 @@ vint16m8_t test_vwmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_tum( @@ -399,7 +399,7 @@ vint32mf2_t test_vwmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_tum( @@ -408,7 +408,7 @@ vint32mf2_t test_vwmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwmul_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_tum( @@ -417,7 +417,7 @@ vint32m1_t test_vwmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_tum( @@ -426,7 +426,7 @@ vint32m1_t test_vwmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwmul_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_tum( @@ -435,7 +435,7 @@ vint32m2_t test_vwmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_tum( @@ -444,7 +444,7 @@ vint32m2_t test_vwmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwmul_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_tum( @@ -453,7 +453,7 @@ vint32m4_t test_vwmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_tum( @@ -462,7 +462,7 @@ vint32m4_t test_vwmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwmul_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_tum( @@ -471,7 +471,7 @@ vint32m8_t test_vwmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tum( @@ -480,7 +480,7 @@ vint32m8_t test_vwmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwmul_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tum( @@ -489,7 +489,7 @@ vint64m1_t test_vwmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_tum( @@ -498,7 +498,7 @@ vint64m1_t test_vwmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwmul_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_tum( @@ -507,7 +507,7 @@ vint64m2_t test_vwmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_tum( @@ -516,7 +516,7 @@ vint64m2_t test_vwmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwmul_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_tum( @@ -525,7 +525,7 @@ vint64m4_t test_vwmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_tum( @@ -534,7 +534,7 @@ vint64m4_t test_vwmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwmul_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_tum( @@ -543,7 +543,7 @@ vint64m8_t test_vwmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_tumu( @@ -552,7 +552,7 @@ vint64m8_t test_vwmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_tumu( @@ -561,7 +561,7 @@ vint16mf4_t test_vwmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_tumu( @@ -570,7 +570,7 @@ vint16mf4_t test_vwmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_tumu( @@ -579,7 +579,7 @@ vint16mf2_t test_vwmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_tumu( @@ -588,7 +588,7 @@ vint16mf2_t test_vwmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_tumu( @@ -597,7 +597,7 @@ vint16m1_t test_vwmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_tumu( @@ -606,7 +606,7 @@ vint16m1_t test_vwmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_tumu( @@ -615,7 +615,7 @@ vint16m2_t test_vwmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_tumu( @@ -624,7 +624,7 @@ vint16m2_t test_vwmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_tumu( @@ -633,7 +633,7 @@ vint16m4_t test_vwmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_tumu( @@ -642,7 +642,7 @@ vint16m4_t test_vwmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_tumu( @@ -651,7 +651,7 @@ vint16m8_t test_vwmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_tumu( @@ -660,7 +660,7 @@ vint16m8_t test_vwmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_tumu( @@ -669,7 +669,7 @@ vint32mf2_t test_vwmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_tumu( @@ -678,7 +678,7 @@ vint32mf2_t test_vwmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_tumu( @@ -687,7 +687,7 @@ vint32m1_t test_vwmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_tumu( @@ -696,7 +696,7 @@ vint32m1_t test_vwmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_tumu( @@ -705,7 +705,7 @@ vint32m2_t test_vwmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_tumu( @@ -714,7 +714,7 @@ vint32m2_t test_vwmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_tumu( @@ -723,7 +723,7 @@ vint32m4_t test_vwmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_tumu( @@ -732,7 +732,7 @@ vint32m4_t test_vwmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_tumu( @@ -741,7 +741,7 @@ vint32m8_t test_vwmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_tumu( @@ -750,7 +750,7 @@ vint32m8_t test_vwmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_tumu( @@ -759,7 +759,7 @@ vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_tumu( @@ -768,7 +768,7 @@ vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_tumu( @@ -777,7 +777,7 @@ vint64m2_t test_vwmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_tumu( @@ -786,7 +786,7 @@ vint64m2_t test_vwmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_tumu( @@ -795,7 +795,7 @@ vint64m4_t test_vwmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_tumu( @@ -804,7 +804,7 @@ vint64m4_t test_vwmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_tumu( @@ -813,7 +813,7 @@ vint64m8_t test_vwmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_mu( @@ -822,7 +822,7 @@ vint64m8_t test_vwmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_mu( @@ -831,7 +831,7 @@ vint16mf4_t test_vwmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_mu( @@ -840,7 +840,7 @@ vint16mf4_t test_vwmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_mu( @@ -849,7 +849,7 @@ vint16mf2_t test_vwmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_mu( @@ -858,7 +858,7 @@ vint16mf2_t test_vwmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwmul_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_mu( @@ -867,7 +867,7 @@ vint16m1_t test_vwmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_mu( @@ -876,7 +876,7 @@ vint16m1_t test_vwmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwmul_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_mu( @@ -885,7 +885,7 @@ vint16m2_t test_vwmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_mu( @@ -894,7 +894,7 @@ vint16m2_t test_vwmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwmul_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_mu( @@ -903,7 +903,7 @@ vint16m4_t test_vwmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_mu( @@ -912,7 +912,7 @@ vint16m4_t test_vwmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwmul_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_mu( @@ -921,7 +921,7 @@ vint16m8_t test_vwmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwmul_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_mu( @@ -930,7 +930,7 @@ vint16m8_t test_vwmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_mu( @@ -939,7 +939,7 @@ vint32mf2_t test_vwmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_mu( @@ -948,7 +948,7 @@ vint32mf2_t test_vwmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwmul_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_mu( @@ -957,7 +957,7 @@ vint32m1_t test_vwmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_mu( @@ -966,7 +966,7 @@ vint32m1_t test_vwmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwmul_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_mu( @@ -975,7 +975,7 @@ vint32m2_t test_vwmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_mu( @@ -984,7 +984,7 @@ vint32m2_t test_vwmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwmul_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_mu( @@ -993,7 +993,7 @@ vint32m4_t test_vwmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_mu( @@ -1002,7 +1002,7 @@ vint32m4_t test_vwmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwmul_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_mu( @@ -1011,7 +1011,7 @@ vint32m8_t test_vwmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwmul_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_mu( @@ -1020,7 +1020,7 @@ vint32m8_t test_vwmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwmul_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_mu( @@ -1029,7 +1029,7 @@ vint64m1_t test_vwmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_mu( @@ -1038,7 +1038,7 @@ vint64m1_t test_vwmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwmul_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_mu( @@ -1047,7 +1047,7 @@ vint64m2_t test_vwmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_mu( @@ -1056,7 +1056,7 @@ vint64m2_t test_vwmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwmul_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_mu( @@ -1065,7 +1065,7 @@ vint64m4_t test_vwmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_mu( @@ -1074,7 +1074,7 @@ vint64m4_t test_vwmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwmul_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_mu( @@ -1083,6 +1083,6 @@ vint64m8_t test_vwmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwmul_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmul_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmulsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmulsu.c index cfaa9203ca15..a3996835ad92 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmulsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmulsu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulsu_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_tu( @@ -21,7 +21,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_tu( @@ -30,7 +30,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulsu_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_tu( @@ -39,7 +39,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_tu( @@ -48,7 +48,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulsu_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_tu( @@ -57,7 +57,7 @@ vint16m1_t test_vwmulsu_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_tu( @@ -66,7 +66,7 @@ vint16m1_t test_vwmulsu_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulsu_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_tu( @@ -75,7 +75,7 @@ vint16m2_t test_vwmulsu_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_tu( @@ -84,7 +84,7 @@ vint16m2_t test_vwmulsu_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulsu_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_tu( @@ -93,7 +93,7 @@ vint16m4_t test_vwmulsu_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_tu( @@ -102,7 +102,7 @@ vint16m4_t test_vwmulsu_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulsu_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_tu( @@ -111,7 +111,7 @@ vint16m8_t test_vwmulsu_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_tu( @@ -120,7 +120,7 @@ vint16m8_t test_vwmulsu_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, uint8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulsu_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_tu( @@ -129,7 +129,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_tu( @@ -138,7 +138,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulsu_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_tu( @@ -147,7 +147,7 @@ vint32m1_t test_vwmulsu_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_tu( @@ -156,7 +156,7 @@ vint32m1_t test_vwmulsu_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, uint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulsu_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_tu( @@ -165,7 +165,7 @@ vint32m2_t test_vwmulsu_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_tu( @@ -174,7 +174,7 @@ vint32m2_t test_vwmulsu_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulsu_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_tu( @@ -183,7 +183,7 @@ vint32m4_t test_vwmulsu_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_tu( @@ -192,7 +192,7 @@ vint32m4_t test_vwmulsu_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulsu_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_tu( @@ -201,7 +201,7 @@ vint32m8_t test_vwmulsu_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tu( @@ -210,7 +210,7 @@ vint32m8_t test_vwmulsu_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulsu_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tu( @@ -219,7 +219,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_tu( @@ -228,7 +228,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, uint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulsu_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_tu( @@ -237,7 +237,7 @@ vint64m2_t test_vwmulsu_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_tu( @@ -246,7 +246,7 @@ vint64m2_t test_vwmulsu_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulsu_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_tu( @@ -255,7 +255,7 @@ vint64m4_t test_vwmulsu_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_tu( @@ -264,7 +264,7 @@ vint64m4_t test_vwmulsu_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulsu_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_tu( @@ -273,7 +273,7 @@ vint64m8_t test_vwmulsu_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_tum( @@ -282,7 +282,7 @@ vint64m8_t test_vwmulsu_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulsu_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_tum( @@ -291,7 +291,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_tum( @@ -300,7 +300,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulsu_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_tum( @@ -309,7 +309,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_tum( @@ -318,7 +318,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulsu_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_tum( @@ -327,7 +327,7 @@ vint16m1_t test_vwmulsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_tum( @@ -336,7 +336,7 @@ vint16m1_t test_vwmulsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulsu_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_tum( @@ -345,7 +345,7 @@ vint16m2_t test_vwmulsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_tum( @@ -354,7 +354,7 @@ vint16m2_t test_vwmulsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulsu_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_tum( @@ -363,7 +363,7 @@ vint16m4_t test_vwmulsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_tum( @@ -372,7 +372,7 @@ vint16m4_t test_vwmulsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulsu_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_tum( @@ -381,7 +381,7 @@ vint16m8_t test_vwmulsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_tum( @@ -390,7 +390,7 @@ vint16m8_t test_vwmulsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulsu_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_tum( @@ -399,7 +399,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_tum( @@ -408,7 +408,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulsu_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_tum( @@ -417,7 +417,7 @@ vint32m1_t test_vwmulsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_tum( @@ -426,7 +426,7 @@ vint32m1_t test_vwmulsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulsu_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_tum( @@ -435,7 +435,7 @@ vint32m2_t test_vwmulsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_tum( @@ -444,7 +444,7 @@ vint32m2_t test_vwmulsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulsu_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_tum( @@ -453,7 +453,7 @@ vint32m4_t test_vwmulsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_tum( @@ -462,7 +462,7 @@ vint32m4_t test_vwmulsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulsu_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_tum( @@ -471,7 +471,7 @@ vint32m8_t test_vwmulsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tum( @@ -480,7 +480,7 @@ vint32m8_t test_vwmulsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulsu_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tum( @@ -489,7 +489,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_tum( @@ -498,7 +498,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulsu_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_tum( @@ -507,7 +507,7 @@ vint64m2_t test_vwmulsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_tum( @@ -516,7 +516,7 @@ vint64m2_t test_vwmulsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulsu_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_tum( @@ -525,7 +525,7 @@ vint64m4_t test_vwmulsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_tum( @@ -534,7 +534,7 @@ vint64m4_t test_vwmulsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulsu_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_tum( @@ -543,7 +543,7 @@ vint64m8_t test_vwmulsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_tumu( @@ -552,7 +552,7 @@ vint64m8_t test_vwmulsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulsu_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_tumu( @@ -561,7 +561,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_tumu( @@ -570,7 +570,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulsu_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_tumu( @@ -579,7 +579,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_tumu( @@ -588,7 +588,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulsu_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_tumu( @@ -597,7 +597,7 @@ vint16m1_t test_vwmulsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_tumu( @@ -606,7 +606,7 @@ vint16m1_t test_vwmulsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulsu_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_tumu( @@ -615,7 +615,7 @@ vint16m2_t test_vwmulsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_tumu( @@ -624,7 +624,7 @@ vint16m2_t test_vwmulsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulsu_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_tumu( @@ -633,7 +633,7 @@ vint16m4_t test_vwmulsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_tumu( @@ -642,7 +642,7 @@ vint16m4_t test_vwmulsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulsu_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_tumu( @@ -651,7 +651,7 @@ vint16m8_t test_vwmulsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_tumu( @@ -660,7 +660,7 @@ vint16m8_t test_vwmulsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulsu_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_tumu( @@ -669,7 +669,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_tumu( @@ -678,7 +678,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulsu_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_tumu( @@ -687,7 +687,7 @@ vint32m1_t test_vwmulsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_tumu( @@ -696,7 +696,7 @@ vint32m1_t test_vwmulsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulsu_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_tumu( @@ -705,7 +705,7 @@ vint32m2_t test_vwmulsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_tumu( @@ -714,7 +714,7 @@ vint32m2_t test_vwmulsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulsu_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_tumu( @@ -723,7 +723,7 @@ vint32m4_t test_vwmulsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_tumu( @@ -732,7 +732,7 @@ vint32m4_t test_vwmulsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulsu_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_tumu( @@ -741,7 +741,7 @@ vint32m8_t test_vwmulsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_tumu( @@ -750,7 +750,7 @@ vint32m8_t test_vwmulsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulsu_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_tumu( @@ -759,7 +759,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_tumu( @@ -768,7 +768,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulsu_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_tumu( @@ -777,7 +777,7 @@ vint64m2_t test_vwmulsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_tumu( @@ -786,7 +786,7 @@ vint64m2_t test_vwmulsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulsu_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_tumu( @@ -795,7 +795,7 @@ vint64m4_t test_vwmulsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_tumu( @@ -804,7 +804,7 @@ vint64m4_t test_vwmulsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulsu_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_tumu( @@ -813,7 +813,7 @@ vint64m8_t test_vwmulsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_mu( @@ -822,7 +822,7 @@ vint64m8_t test_vwmulsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulsu_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_mu( @@ -831,7 +831,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_mu( @@ -840,7 +840,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulsu_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_mu( @@ -849,7 +849,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_mu( @@ -858,7 +858,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulsu_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_mu( @@ -867,7 +867,7 @@ vint16m1_t test_vwmulsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_mu( @@ -876,7 +876,7 @@ vint16m1_t test_vwmulsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulsu_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_mu( @@ -885,7 +885,7 @@ vint16m2_t test_vwmulsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_mu( @@ -894,7 +894,7 @@ vint16m2_t test_vwmulsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulsu_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_mu( @@ -903,7 +903,7 @@ vint16m4_t test_vwmulsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_mu( @@ -912,7 +912,7 @@ vint16m4_t test_vwmulsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulsu_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_mu( @@ -921,7 +921,7 @@ vint16m8_t test_vwmulsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulsu_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_mu( @@ -930,7 +930,7 @@ vint16m8_t test_vwmulsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulsu_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_mu( @@ -939,7 +939,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_mu( @@ -948,7 +948,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulsu_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_mu( @@ -957,7 +957,7 @@ vint32m1_t test_vwmulsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_mu( @@ -966,7 +966,7 @@ vint32m1_t test_vwmulsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulsu_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_mu( @@ -975,7 +975,7 @@ vint32m2_t test_vwmulsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_mu( @@ -984,7 +984,7 @@ vint32m2_t test_vwmulsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulsu_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_mu( @@ -993,7 +993,7 @@ vint32m4_t test_vwmulsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_mu( @@ -1002,7 +1002,7 @@ vint32m4_t test_vwmulsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulsu_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_mu( @@ -1011,7 +1011,7 @@ vint32m8_t test_vwmulsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulsu_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_mu( @@ -1020,7 +1020,7 @@ vint32m8_t test_vwmulsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulsu_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_mu( @@ -1029,7 +1029,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_mu( @@ -1038,7 +1038,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulsu_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_mu( @@ -1047,7 +1047,7 @@ vint64m2_t test_vwmulsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_mu( @@ -1056,7 +1056,7 @@ vint64m2_t test_vwmulsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulsu_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_mu( @@ -1065,7 +1065,7 @@ vint64m4_t test_vwmulsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_mu( @@ -1074,7 +1074,7 @@ vint64m4_t test_vwmulsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulsu_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_mu( @@ -1083,6 +1083,6 @@ vint64m8_t test_vwmulsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulsu_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulsu_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmulu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmulu.c index 2f2f79319ba4..ac2479ce6d4b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmulu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwmulu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_tu( @@ -21,7 +21,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_tu( @@ -30,7 +30,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_tu( @@ -39,7 +39,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_tu( @@ -48,7 +48,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_tu( @@ -57,7 +57,7 @@ vuint16m1_t test_vwmulu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_tu( @@ -66,7 +66,7 @@ vuint16m1_t test_vwmulu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_tu( @@ -75,7 +75,7 @@ vuint16m2_t test_vwmulu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_tu( @@ -84,7 +84,7 @@ vuint16m2_t test_vwmulu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_tu( @@ -93,7 +93,7 @@ vuint16m4_t test_vwmulu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_tu( @@ -102,7 +102,7 @@ vuint16m4_t test_vwmulu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_tu( @@ -111,7 +111,7 @@ vuint16m8_t test_vwmulu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_tu( @@ -120,7 +120,7 @@ vuint16m8_t test_vwmulu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_tu( @@ -129,7 +129,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_tu( @@ -138,7 +138,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_tu( @@ -147,7 +147,7 @@ vuint32m1_t test_vwmulu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_tu( @@ -156,7 +156,7 @@ vuint32m1_t test_vwmulu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_tu( @@ -165,7 +165,7 @@ vuint32m2_t test_vwmulu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_tu( @@ -174,7 +174,7 @@ vuint32m2_t test_vwmulu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_tu( @@ -183,7 +183,7 @@ vuint32m4_t test_vwmulu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_tu( @@ -192,7 +192,7 @@ vuint32m4_t test_vwmulu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_tu( @@ -201,7 +201,7 @@ vuint32m8_t test_vwmulu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tu( @@ -210,7 +210,7 @@ vuint32m8_t test_vwmulu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tu( @@ -219,7 +219,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_tu( @@ -228,7 +228,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_tu( @@ -237,7 +237,7 @@ vuint64m2_t test_vwmulu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_tu( @@ -246,7 +246,7 @@ vuint64m2_t test_vwmulu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_tu( @@ -255,7 +255,7 @@ vuint64m4_t test_vwmulu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_tu( @@ -264,7 +264,7 @@ vuint64m4_t test_vwmulu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_tu( @@ -273,7 +273,7 @@ vuint64m8_t test_vwmulu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_tum( @@ -282,7 +282,7 @@ vuint64m8_t test_vwmulu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_tum( @@ -291,7 +291,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_tum( @@ -300,7 +300,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_tum( @@ -309,7 +309,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_tum( @@ -318,7 +318,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_tum( @@ -327,7 +327,7 @@ vuint16m1_t test_vwmulu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_tum( @@ -336,7 +336,7 @@ vuint16m1_t test_vwmulu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_tum( @@ -345,7 +345,7 @@ vuint16m2_t test_vwmulu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_tum( @@ -354,7 +354,7 @@ vuint16m2_t test_vwmulu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_tum( @@ -363,7 +363,7 @@ vuint16m4_t test_vwmulu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_tum( @@ -372,7 +372,7 @@ vuint16m4_t test_vwmulu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_tum( @@ -381,7 +381,7 @@ vuint16m8_t test_vwmulu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_tum( @@ -390,7 +390,7 @@ vuint16m8_t test_vwmulu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_tum( @@ -399,7 +399,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_tum( @@ -408,7 +408,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_tum( @@ -417,7 +417,7 @@ vuint32m1_t test_vwmulu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_tum( @@ -426,7 +426,7 @@ vuint32m1_t test_vwmulu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_tum( @@ -435,7 +435,7 @@ vuint32m2_t test_vwmulu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_tum( @@ -444,7 +444,7 @@ vuint32m2_t test_vwmulu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_tum( @@ -453,7 +453,7 @@ vuint32m4_t test_vwmulu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_tum( @@ -462,7 +462,7 @@ vuint32m4_t test_vwmulu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_tum( @@ -471,7 +471,7 @@ vuint32m8_t test_vwmulu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tum( @@ -480,7 +480,7 @@ vuint32m8_t test_vwmulu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tum( @@ -489,7 +489,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_tum( @@ -498,7 +498,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_tum( @@ -507,7 +507,7 @@ vuint64m2_t test_vwmulu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_tum( @@ -516,7 +516,7 @@ vuint64m2_t test_vwmulu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_tum( @@ -525,7 +525,7 @@ vuint64m4_t test_vwmulu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_tum( @@ -534,7 +534,7 @@ vuint64m4_t test_vwmulu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_tum( @@ -543,7 +543,7 @@ vuint64m8_t test_vwmulu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_tumu( @@ -552,7 +552,7 @@ vuint64m8_t test_vwmulu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_tumu( @@ -561,7 +561,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_tumu( @@ -570,7 +570,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_tumu( @@ -579,7 +579,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_tumu( @@ -588,7 +588,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_tumu( @@ -597,7 +597,7 @@ vuint16m1_t test_vwmulu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_tumu( @@ -606,7 +606,7 @@ vuint16m1_t test_vwmulu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_tumu( @@ -615,7 +615,7 @@ vuint16m2_t test_vwmulu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_tumu( @@ -624,7 +624,7 @@ vuint16m2_t test_vwmulu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_tumu( @@ -633,7 +633,7 @@ vuint16m4_t test_vwmulu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_tumu( @@ -642,7 +642,7 @@ vuint16m4_t test_vwmulu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_tumu( @@ -651,7 +651,7 @@ vuint16m8_t test_vwmulu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_tumu( @@ -660,7 +660,7 @@ vuint16m8_t test_vwmulu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_tumu( @@ -669,7 +669,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_tumu( @@ -678,7 +678,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_tumu( @@ -687,7 +687,7 @@ vuint32m1_t test_vwmulu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_tumu( @@ -696,7 +696,7 @@ vuint32m1_t test_vwmulu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_tumu( @@ -705,7 +705,7 @@ vuint32m2_t test_vwmulu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_tumu( @@ -714,7 +714,7 @@ vuint32m2_t test_vwmulu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_tumu( @@ -723,7 +723,7 @@ vuint32m4_t test_vwmulu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_tumu( @@ -732,7 +732,7 @@ vuint32m4_t test_vwmulu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_tumu( @@ -741,7 +741,7 @@ vuint32m8_t test_vwmulu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_tumu( @@ -750,7 +750,7 @@ vuint32m8_t test_vwmulu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_tumu( @@ -759,7 +759,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_tumu( @@ -768,7 +768,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_tumu( @@ -777,7 +777,7 @@ vuint64m2_t test_vwmulu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_tumu( @@ -786,7 +786,7 @@ vuint64m2_t test_vwmulu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_tumu( @@ -795,7 +795,7 @@ vuint64m4_t test_vwmulu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_tumu( @@ -804,7 +804,7 @@ vuint64m4_t test_vwmulu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_tumu( @@ -813,7 +813,7 @@ vuint64m8_t test_vwmulu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_mu( @@ -822,7 +822,7 @@ vuint64m8_t test_vwmulu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_mu( @@ -831,7 +831,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_mu( @@ -840,7 +840,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_mu( @@ -849,7 +849,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_mu( @@ -858,7 +858,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_mu( @@ -867,7 +867,7 @@ vuint16m1_t test_vwmulu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_mu( @@ -876,7 +876,7 @@ vuint16m1_t test_vwmulu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_mu( @@ -885,7 +885,7 @@ vuint16m2_t test_vwmulu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_mu( @@ -894,7 +894,7 @@ vuint16m2_t test_vwmulu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_mu( @@ -903,7 +903,7 @@ vuint16m4_t test_vwmulu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_mu( @@ -912,7 +912,7 @@ vuint16m4_t test_vwmulu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_mu( @@ -921,7 +921,7 @@ vuint16m8_t test_vwmulu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_mu( @@ -930,7 +930,7 @@ vuint16m8_t test_vwmulu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_mu( @@ -939,7 +939,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_mu( @@ -948,7 +948,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_mu( @@ -957,7 +957,7 @@ vuint32m1_t test_vwmulu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_mu( @@ -966,7 +966,7 @@ vuint32m1_t test_vwmulu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_mu( @@ -975,7 +975,7 @@ vuint32m2_t test_vwmulu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_mu( @@ -984,7 +984,7 @@ vuint32m2_t test_vwmulu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_mu( @@ -993,7 +993,7 @@ vuint32m4_t test_vwmulu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_mu( @@ -1002,7 +1002,7 @@ vuint32m4_t test_vwmulu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_mu( @@ -1011,7 +1011,7 @@ vuint32m8_t test_vwmulu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_mu( @@ -1020,7 +1020,7 @@ vuint32m8_t test_vwmulu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_mu( @@ -1029,7 +1029,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_mu( @@ -1038,7 +1038,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_mu( @@ -1047,7 +1047,7 @@ vuint64m2_t test_vwmulu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_mu( @@ -1056,7 +1056,7 @@ vuint64m2_t test_vwmulu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_mu( @@ -1065,7 +1065,7 @@ vuint64m4_t test_vwmulu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_mu( @@ -1074,7 +1074,7 @@ vuint64m4_t test_vwmulu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_mu( @@ -1083,6 +1083,6 @@ vuint64m8_t test_vwmulu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwmulu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwredsum.c index d944afb1b793..bf61fb2984bc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwredsum.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu(vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf8_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_tu( @@ -21,7 +21,7 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu(vint16m1_t maskedoff, vint8mf8_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu(vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_tu( @@ -30,7 +30,7 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu(vint16m1_t maskedoff, vint8mf4_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_tu( @@ -39,7 +39,7 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu(vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m1_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m1_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_tu( @@ -48,7 +48,7 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu(vint16m1_t maskedoff, vint8m1_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu(vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m2_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m2_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_tu( @@ -57,7 +57,7 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu(vint16m1_t maskedoff, vint8m2_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu(vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m4_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m4_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_tu( @@ -66,7 +66,7 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu(vint16m1_t maskedoff, vint8m4_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu(vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m8_i16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m8_i16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_tu( @@ -75,7 +75,7 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu(vint16m1_t maskedoff, vint8m8_t vector // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu(vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf4_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16mf4_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_tu( @@ -84,7 +84,7 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu(vint32m1_t maskedoff, vint16mf4_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16mf2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_tu( @@ -93,7 +93,7 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu(vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m1_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m1_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_tu( @@ -102,7 +102,7 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu(vint32m1_t maskedoff, vint16m1_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu(vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m2_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m2_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_tu( @@ -111,7 +111,7 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu(vint32m1_t maskedoff, vint16m2_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu(vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m4_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m4_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_tu( @@ -120,7 +120,7 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu(vint32m1_t maskedoff, vint16m4_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu(vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m8_i32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m8_i32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tu( @@ -129,7 +129,7 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu(vint32m1_t maskedoff, vint16m8_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i32mf2_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_tu( @@ -138,7 +138,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu(vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m1_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m1_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_tu( @@ -147,7 +147,7 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu(vint64m1_t maskedoff, vint32m1_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu(vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m2_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m2_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_tu( @@ -156,7 +156,7 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu(vint64m1_t maskedoff, vint32m2_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu(vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m4_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m4_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_tu( @@ -165,7 +165,7 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu(vint64m1_t maskedoff, vint32m4_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu(vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m8_i64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m8_i64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_tum( @@ -174,7 +174,7 @@ vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu(vint64m1_t maskedoff, vint32m8_t vect // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf8_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf8_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_tum( @@ -183,7 +183,7 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_tum( @@ -192,7 +192,7 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_tum( @@ -201,7 +201,7 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_tum( @@ -210,7 +210,7 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_tum( @@ -219,7 +219,7 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_tum( @@ -228,7 +228,7 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum(vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return vwredsum_vs_i8m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i8m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_tum( @@ -237,7 +237,7 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum(vbool1_t mask, vint16m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf4_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16mf4_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_tum( @@ -246,7 +246,7 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum(vbool64_t mask, vint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_tum( @@ -255,7 +255,7 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_tum( @@ -264,7 +264,7 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_tum( @@ -273,7 +273,7 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_tum( @@ -282,7 +282,7 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum(vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return vwredsum_vs_i16m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i16m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tum( @@ -291,7 +291,7 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum(vbool2_t mask, vint32m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32mf2_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i32mf2_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_tum( @@ -300,7 +300,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_tum( @@ -309,7 +309,7 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_tum( @@ -318,7 +318,7 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_tum( @@ -327,6 +327,6 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m8_i64m1_tum(vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return vwredsum_vs_i32m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsum_vs_i32m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwredsumu.c index dc063a125278..72eebb173f00 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwredsumu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwredsumu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu(vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf8_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf8_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_tu( @@ -21,7 +21,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu(vuint16m1_t maskedoff, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu(vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_tu( @@ -30,7 +30,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu(vuint16m1_t maskedoff, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_tu( @@ -39,7 +39,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu(vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m1_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m1_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_tu( @@ -48,7 +48,7 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu(vuint16m1_t maskedoff, vuint8m1_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu(vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m2_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m2_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_tu( @@ -57,7 +57,7 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu(vuint16m1_t maskedoff, vuint8m2_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu(vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m4_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m4_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_tu( @@ -66,7 +66,7 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu(vuint16m1_t maskedoff, vuint8m4_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu(vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m8_u16m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m8_u16m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_tu( @@ -75,7 +75,7 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu(vuint16m1_t maskedoff, vuint8m8_t ve // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu(vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf4_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16mf4_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_tu( @@ -84,7 +84,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu(vuint32m1_t maskedoff, vuint16mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16mf2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_tu( @@ -93,7 +93,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu(vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m1_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m1_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_tu( @@ -102,7 +102,7 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu(vuint32m1_t maskedoff, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu(vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m2_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m2_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_tu( @@ -111,7 +111,7 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu(vuint32m1_t maskedoff, vuint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu(vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m4_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m4_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_tu( @@ -120,7 +120,7 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu(vuint32m1_t maskedoff, vuint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu(vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m8_u32m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m8_u32m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tu( @@ -129,7 +129,7 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu(vuint32m1_t maskedoff, vuint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32mf2_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_tu( @@ -138,7 +138,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu(vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m1_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m1_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_tu( @@ -147,7 +147,7 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu(vuint64m1_t maskedoff, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu(vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m2_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m2_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_tu( @@ -156,7 +156,7 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu(vuint64m1_t maskedoff, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu(vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m4_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m4_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_tu( @@ -165,7 +165,7 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu(vuint64m1_t maskedoff, vuint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu(vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m8_u64m1_tu(maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m8_u64m1_tu(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_tum( @@ -174,7 +174,7 @@ vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu(vuint64m1_t maskedoff, vuint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf8_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf8_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_tum( @@ -183,7 +183,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum(vbool64_t mask, vuint16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_tum( @@ -192,7 +192,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum(vbool32_t mask, vuint16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_tum( @@ -201,7 +201,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum(vbool16_t mask, vuint16m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_tum( @@ -210,7 +210,7 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum(vbool8_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_tum( @@ -219,7 +219,7 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum(vbool4_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_tum( @@ -228,7 +228,7 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum(vbool2_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum(vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return vwredsumu_vs_u8m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u8m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_tum( @@ -237,7 +237,7 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum(vbool1_t mask, vuint16m1_t maskedof // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf4_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16mf4_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_tum( @@ -246,7 +246,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum(vbool64_t mask, vuint32m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_tum( @@ -255,7 +255,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum(vbool32_t mask, vuint32m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_tum( @@ -264,7 +264,7 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum(vbool16_t mask, vuint32m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_tum( @@ -273,7 +273,7 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum(vbool8_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_tum( @@ -282,7 +282,7 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum(vbool4_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum(vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return vwredsumu_vs_u16m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u16m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tum( @@ -291,7 +291,7 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum(vbool2_t mask, vuint32m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32mf2_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32mf2_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_tum( @@ -300,7 +300,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t maske // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_tum( @@ -309,7 +309,7 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum(vbool32_t mask, vuint64m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_tum( @@ -318,7 +318,7 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum(vbool16_t mask, vuint64m1_t masked // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_tum( @@ -327,6 +327,6 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum(vbool8_t mask, vuint64m1_t maskedo // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tum(vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return vwredsumu_vs_u32m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); + return __riscv_vwredsumu_vs_u32m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwsub.c index 105e73b8b7a7..7314a192cc3b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwsub.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_tu( @@ -21,7 +21,7 @@ vint16mf4_t test_vwsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_tu( @@ -30,7 +30,7 @@ vint16mf4_t test_vwsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_wv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_tu( @@ -39,7 +39,7 @@ vint16mf4_t test_vwsub_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_tu( @@ -48,7 +48,7 @@ vint16mf4_t test_vwsub_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_tu( @@ -57,7 +57,7 @@ vint16mf2_t test_vwsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_tu( @@ -66,7 +66,7 @@ vint16mf2_t test_vwsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_wv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_tu( @@ -75,7 +75,7 @@ vint16mf2_t test_vwsub_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_tu( @@ -84,7 +84,7 @@ vint16mf2_t test_vwsub_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_tu( @@ -93,7 +93,7 @@ vint16m1_t test_vwsub_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_tu( @@ -102,7 +102,7 @@ vint16m1_t test_vwsub_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_wv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_tu( @@ -111,7 +111,7 @@ vint16m1_t test_vwsub_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_tu( @@ -120,7 +120,7 @@ vint16m1_t test_vwsub_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwsub_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_tu( @@ -129,7 +129,7 @@ vint16m2_t test_vwsub_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_tu( @@ -138,7 +138,7 @@ vint16m2_t test_vwsub_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwsub_wv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_tu( @@ -147,7 +147,7 @@ vint16m2_t test_vwsub_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_tu( @@ -156,7 +156,7 @@ vint16m2_t test_vwsub_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwsub_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_tu( @@ -165,7 +165,7 @@ vint16m4_t test_vwsub_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_tu( @@ -174,7 +174,7 @@ vint16m4_t test_vwsub_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwsub_wv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_tu( @@ -183,7 +183,7 @@ vint16m4_t test_vwsub_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_tu( @@ -192,7 +192,7 @@ vint16m4_t test_vwsub_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwsub_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_tu( @@ -201,7 +201,7 @@ vint16m8_t test_vwsub_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_tu( @@ -210,7 +210,7 @@ vint16m8_t test_vwsub_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwsub_wv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_tu( @@ -219,7 +219,7 @@ vint16m8_t test_vwsub_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_tu( @@ -228,7 +228,7 @@ vint16m8_t test_vwsub_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_tu( @@ -237,7 +237,7 @@ vint32mf2_t test_vwsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_tu( @@ -246,7 +246,7 @@ vint32mf2_t test_vwsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_wv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vwsub_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vwsub_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vwsub_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vwsub_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_wv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_tu( @@ -291,7 +291,7 @@ vint32m1_t test_vwsub_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_tu( @@ -300,7 +300,7 @@ vint32m1_t test_vwsub_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwsub_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_tu( @@ -309,7 +309,7 @@ vint32m2_t test_vwsub_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_tu( @@ -318,7 +318,7 @@ vint32m2_t test_vwsub_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwsub_wv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_tu( @@ -327,7 +327,7 @@ vint32m2_t test_vwsub_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_tu( @@ -336,7 +336,7 @@ vint32m2_t test_vwsub_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwsub_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_tu( @@ -345,7 +345,7 @@ vint32m4_t test_vwsub_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_tu( @@ -354,7 +354,7 @@ vint32m4_t test_vwsub_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwsub_wv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_tu( @@ -363,7 +363,7 @@ vint32m4_t test_vwsub_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_tu( @@ -372,7 +372,7 @@ vint32m4_t test_vwsub_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwsub_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_tu( @@ -381,7 +381,7 @@ vint32m8_t test_vwsub_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_tu( @@ -390,7 +390,7 @@ vint32m8_t test_vwsub_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwsub_wv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_tu( @@ -399,7 +399,7 @@ vint32m8_t test_vwsub_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tu( @@ -408,7 +408,7 @@ vint32m8_t test_vwsub_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tu( @@ -417,7 +417,7 @@ vint64m1_t test_vwsub_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tu( @@ -426,7 +426,7 @@ vint64m1_t test_vwsub_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_wv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tu( @@ -435,7 +435,7 @@ vint64m1_t test_vwsub_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_tu( @@ -444,7 +444,7 @@ vint64m1_t test_vwsub_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwsub_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_tu( @@ -453,7 +453,7 @@ vint64m2_t test_vwsub_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_tu( @@ -462,7 +462,7 @@ vint64m2_t test_vwsub_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwsub_wv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_tu( @@ -471,7 +471,7 @@ vint64m2_t test_vwsub_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_tu( @@ -480,7 +480,7 @@ vint64m2_t test_vwsub_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwsub_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_tu( @@ -489,7 +489,7 @@ vint64m4_t test_vwsub_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_tu( @@ -498,7 +498,7 @@ vint64m4_t test_vwsub_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwsub_wv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_tu( @@ -507,7 +507,7 @@ vint64m4_t test_vwsub_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_tu( @@ -516,7 +516,7 @@ vint64m4_t test_vwsub_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwsub_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_tu( @@ -525,7 +525,7 @@ vint64m8_t test_vwsub_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_tu( @@ -534,7 +534,7 @@ vint64m8_t test_vwsub_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwsub_wv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_tu( @@ -543,7 +543,7 @@ vint64m8_t test_vwsub_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_tum( @@ -552,7 +552,7 @@ vint64m8_t test_vwsub_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_tum( @@ -561,7 +561,7 @@ vint16mf4_t test_vwsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_tum( @@ -570,7 +570,7 @@ vint16mf4_t test_vwsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_wv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_tum( @@ -579,7 +579,7 @@ vint16mf4_t test_vwsub_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_tum( @@ -588,7 +588,7 @@ vint16mf4_t test_vwsub_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_tum( @@ -597,7 +597,7 @@ vint16mf2_t test_vwsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_tum( @@ -606,7 +606,7 @@ vint16mf2_t test_vwsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_wv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_tum( @@ -615,7 +615,7 @@ vint16mf2_t test_vwsub_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_tum( @@ -624,7 +624,7 @@ vint16mf2_t test_vwsub_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_tum( @@ -633,7 +633,7 @@ vint16m1_t test_vwsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_tum( @@ -642,7 +642,7 @@ vint16m1_t test_vwsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_wv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_tum( @@ -651,7 +651,7 @@ vint16m1_t test_vwsub_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_tum( @@ -660,7 +660,7 @@ vint16m1_t test_vwsub_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwsub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_tum( @@ -669,7 +669,7 @@ vint16m2_t test_vwsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_tum( @@ -678,7 +678,7 @@ vint16m2_t test_vwsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwsub_wv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_tum( @@ -687,7 +687,7 @@ vint16m2_t test_vwsub_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_tum( @@ -696,7 +696,7 @@ vint16m2_t test_vwsub_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwsub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_tum( @@ -705,7 +705,7 @@ vint16m4_t test_vwsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_tum( @@ -714,7 +714,7 @@ vint16m4_t test_vwsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwsub_wv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_tum( @@ -723,7 +723,7 @@ vint16m4_t test_vwsub_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_tum( @@ -732,7 +732,7 @@ vint16m4_t test_vwsub_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwsub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_tum( @@ -741,7 +741,7 @@ vint16m8_t test_vwsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_tum( @@ -750,7 +750,7 @@ vint16m8_t test_vwsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwsub_wv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_tum( @@ -759,7 +759,7 @@ vint16m8_t test_vwsub_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_tum( @@ -768,7 +768,7 @@ vint16m8_t test_vwsub_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_tum( @@ -777,7 +777,7 @@ vint32mf2_t test_vwsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_tum( @@ -786,7 +786,7 @@ vint32mf2_t test_vwsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_wv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_tum( @@ -795,7 +795,7 @@ vint32mf2_t test_vwsub_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_tum( @@ -804,7 +804,7 @@ vint32mf2_t test_vwsub_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_tum( @@ -813,7 +813,7 @@ vint32m1_t test_vwsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_tum( @@ -822,7 +822,7 @@ vint32m1_t test_vwsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_wv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_tum( @@ -831,7 +831,7 @@ vint32m1_t test_vwsub_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_tum( @@ -840,7 +840,7 @@ vint32m1_t test_vwsub_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwsub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_tum( @@ -849,7 +849,7 @@ vint32m2_t test_vwsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_tum( @@ -858,7 +858,7 @@ vint32m2_t test_vwsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwsub_wv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_tum( @@ -867,7 +867,7 @@ vint32m2_t test_vwsub_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_tum( @@ -876,7 +876,7 @@ vint32m2_t test_vwsub_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwsub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_tum( @@ -885,7 +885,7 @@ vint32m4_t test_vwsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_tum( @@ -894,7 +894,7 @@ vint32m4_t test_vwsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwsub_wv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_tum( @@ -903,7 +903,7 @@ vint32m4_t test_vwsub_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_tum( @@ -912,7 +912,7 @@ vint32m4_t test_vwsub_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwsub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_tum( @@ -921,7 +921,7 @@ vint32m8_t test_vwsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_tum( @@ -930,7 +930,7 @@ vint32m8_t test_vwsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwsub_wv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_tum( @@ -939,7 +939,7 @@ vint32m8_t test_vwsub_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tum( @@ -948,7 +948,7 @@ vint32m8_t test_vwsub_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tum( @@ -957,7 +957,7 @@ vint64m1_t test_vwsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tum( @@ -966,7 +966,7 @@ vint64m1_t test_vwsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_wv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tum( @@ -975,7 +975,7 @@ vint64m1_t test_vwsub_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_tum( @@ -984,7 +984,7 @@ vint64m1_t test_vwsub_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwsub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_tum( @@ -993,7 +993,7 @@ vint64m2_t test_vwsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_tum( @@ -1002,7 +1002,7 @@ vint64m2_t test_vwsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwsub_wv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_tum( @@ -1011,7 +1011,7 @@ vint64m2_t test_vwsub_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_tum( @@ -1020,7 +1020,7 @@ vint64m2_t test_vwsub_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwsub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_tum( @@ -1029,7 +1029,7 @@ vint64m4_t test_vwsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_tum( @@ -1038,7 +1038,7 @@ vint64m4_t test_vwsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwsub_wv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_tum( @@ -1047,7 +1047,7 @@ vint64m4_t test_vwsub_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_tum( @@ -1056,7 +1056,7 @@ vint64m4_t test_vwsub_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwsub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_tum( @@ -1065,7 +1065,7 @@ vint64m8_t test_vwsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_tum( @@ -1074,7 +1074,7 @@ vint64m8_t test_vwsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwsub_wv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_tum( @@ -1083,7 +1083,7 @@ vint64m8_t test_vwsub_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_tumu( @@ -1092,7 +1092,7 @@ vint64m8_t test_vwsub_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_tumu( @@ -1101,7 +1101,7 @@ vint16mf4_t test_vwsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_tumu( @@ -1110,7 +1110,7 @@ vint16mf4_t test_vwsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_wv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_tumu( @@ -1119,7 +1119,7 @@ vint16mf4_t test_vwsub_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_tumu( @@ -1128,7 +1128,7 @@ vint16mf4_t test_vwsub_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_tumu( @@ -1137,7 +1137,7 @@ vint16mf2_t test_vwsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_tumu( @@ -1146,7 +1146,7 @@ vint16mf2_t test_vwsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_wv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_tumu( @@ -1155,7 +1155,7 @@ vint16mf2_t test_vwsub_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_tumu( @@ -1164,7 +1164,7 @@ vint16mf2_t test_vwsub_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_tumu( @@ -1173,7 +1173,7 @@ vint16m1_t test_vwsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_tumu( @@ -1182,7 +1182,7 @@ vint16m1_t test_vwsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_wv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_tumu( @@ -1191,7 +1191,7 @@ vint16m1_t test_vwsub_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_tumu( @@ -1200,7 +1200,7 @@ vint16m1_t test_vwsub_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwsub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_tumu( @@ -1209,7 +1209,7 @@ vint16m2_t test_vwsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_tumu( @@ -1218,7 +1218,7 @@ vint16m2_t test_vwsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwsub_wv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_tumu( @@ -1227,7 +1227,7 @@ vint16m2_t test_vwsub_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_tumu( @@ -1236,7 +1236,7 @@ vint16m2_t test_vwsub_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwsub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_tumu( @@ -1245,7 +1245,7 @@ vint16m4_t test_vwsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_tumu( @@ -1254,7 +1254,7 @@ vint16m4_t test_vwsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwsub_wv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_tumu( @@ -1263,7 +1263,7 @@ vint16m4_t test_vwsub_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_tumu( @@ -1272,7 +1272,7 @@ vint16m4_t test_vwsub_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwsub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_tumu( @@ -1281,7 +1281,7 @@ vint16m8_t test_vwsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_tumu( @@ -1290,7 +1290,7 @@ vint16m8_t test_vwsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwsub_wv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_tumu( @@ -1299,7 +1299,7 @@ vint16m8_t test_vwsub_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_tumu( @@ -1308,7 +1308,7 @@ vint16m8_t test_vwsub_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_tumu( @@ -1317,7 +1317,7 @@ vint32mf2_t test_vwsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_tumu( @@ -1326,7 +1326,7 @@ vint32mf2_t test_vwsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_wv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_tumu( @@ -1335,7 +1335,7 @@ vint32mf2_t test_vwsub_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_tumu( @@ -1344,7 +1344,7 @@ vint32mf2_t test_vwsub_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vin // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_tumu( @@ -1353,7 +1353,7 @@ vint32m1_t test_vwsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_tumu( @@ -1362,7 +1362,7 @@ vint32m1_t test_vwsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_wv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_tumu( @@ -1371,7 +1371,7 @@ vint32m1_t test_vwsub_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_tumu( @@ -1380,7 +1380,7 @@ vint32m1_t test_vwsub_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwsub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_tumu( @@ -1389,7 +1389,7 @@ vint32m2_t test_vwsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_tumu( @@ -1398,7 +1398,7 @@ vint32m2_t test_vwsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwsub_wv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_tumu( @@ -1407,7 +1407,7 @@ vint32m2_t test_vwsub_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_tumu( @@ -1416,7 +1416,7 @@ vint32m2_t test_vwsub_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwsub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_tumu( @@ -1425,7 +1425,7 @@ vint32m4_t test_vwsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_tumu( @@ -1434,7 +1434,7 @@ vint32m4_t test_vwsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwsub_wv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_tumu( @@ -1443,7 +1443,7 @@ vint32m4_t test_vwsub_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_tumu( @@ -1452,7 +1452,7 @@ vint32m4_t test_vwsub_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwsub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_tumu( @@ -1461,7 +1461,7 @@ vint32m8_t test_vwsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_tumu( @@ -1470,7 +1470,7 @@ vint32m8_t test_vwsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwsub_wv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_tumu( @@ -1479,7 +1479,7 @@ vint32m8_t test_vwsub_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_tumu( @@ -1488,7 +1488,7 @@ vint32m8_t test_vwsub_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_tumu( @@ -1497,7 +1497,7 @@ vint64m1_t test_vwsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_tumu( @@ -1506,7 +1506,7 @@ vint64m1_t test_vwsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_wv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_tumu( @@ -1515,7 +1515,7 @@ vint64m1_t test_vwsub_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_tumu( @@ -1524,7 +1524,7 @@ vint64m1_t test_vwsub_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwsub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_tumu( @@ -1533,7 +1533,7 @@ vint64m2_t test_vwsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_tumu( @@ -1542,7 +1542,7 @@ vint64m2_t test_vwsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwsub_wv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_tumu( @@ -1551,7 +1551,7 @@ vint64m2_t test_vwsub_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_tumu( @@ -1560,7 +1560,7 @@ vint64m2_t test_vwsub_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwsub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_tumu( @@ -1569,7 +1569,7 @@ vint64m4_t test_vwsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_tumu( @@ -1578,7 +1578,7 @@ vint64m4_t test_vwsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwsub_wv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_tumu( @@ -1587,7 +1587,7 @@ vint64m4_t test_vwsub_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_tumu( @@ -1596,7 +1596,7 @@ vint64m4_t test_vwsub_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwsub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_tumu( @@ -1605,7 +1605,7 @@ vint64m8_t test_vwsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_tumu( @@ -1614,7 +1614,7 @@ vint64m8_t test_vwsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwsub_wv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_tumu( @@ -1623,7 +1623,7 @@ vint64m8_t test_vwsub_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_mu( @@ -1632,7 +1632,7 @@ vint64m8_t test_vwsub_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_mu( @@ -1641,7 +1641,7 @@ vint16mf4_t test_vwsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_mu( @@ -1650,7 +1650,7 @@ vint16mf4_t test_vwsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_wv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_mu( @@ -1659,7 +1659,7 @@ vint16mf4_t test_vwsub_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_mu( @@ -1668,7 +1668,7 @@ vint16mf4_t test_vwsub_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_mu( @@ -1677,7 +1677,7 @@ vint16mf2_t test_vwsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_mu( @@ -1686,7 +1686,7 @@ vint16mf2_t test_vwsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_wv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_mu( @@ -1695,7 +1695,7 @@ vint16mf2_t test_vwsub_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_mu( @@ -1704,7 +1704,7 @@ vint16mf2_t test_vwsub_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_mu( @@ -1713,7 +1713,7 @@ vint16m1_t test_vwsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_mu( @@ -1722,7 +1722,7 @@ vint16m1_t test_vwsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_wv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_mu( @@ -1731,7 +1731,7 @@ vint16m1_t test_vwsub_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_mu( @@ -1740,7 +1740,7 @@ vint16m1_t test_vwsub_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwsub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_mu( @@ -1749,7 +1749,7 @@ vint16m2_t test_vwsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_mu( @@ -1758,7 +1758,7 @@ vint16m2_t test_vwsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwsub_wv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_mu( @@ -1767,7 +1767,7 @@ vint16m2_t test_vwsub_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_mu( @@ -1776,7 +1776,7 @@ vint16m2_t test_vwsub_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwsub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_mu( @@ -1785,7 +1785,7 @@ vint16m4_t test_vwsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_mu( @@ -1794,7 +1794,7 @@ vint16m4_t test_vwsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwsub_wv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_mu( @@ -1803,7 +1803,7 @@ vint16m4_t test_vwsub_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_mu( @@ -1812,7 +1812,7 @@ vint16m4_t test_vwsub_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwsub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_mu( @@ -1821,7 +1821,7 @@ vint16m8_t test_vwsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwsub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_mu( @@ -1830,7 +1830,7 @@ vint16m8_t test_vwsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwsub_wv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_mu( @@ -1839,7 +1839,7 @@ vint16m8_t test_vwsub_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwsub_wx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_mu( @@ -1848,7 +1848,7 @@ vint16m8_t test_vwsub_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_mu( @@ -1857,7 +1857,7 @@ vint32mf2_t test_vwsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_mu( @@ -1866,7 +1866,7 @@ vint32mf2_t test_vwsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_wv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_mu( @@ -1875,7 +1875,7 @@ vint32mf2_t test_vwsub_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_mu( @@ -1884,7 +1884,7 @@ vint32mf2_t test_vwsub_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_mu( @@ -1893,7 +1893,7 @@ vint32m1_t test_vwsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_mu( @@ -1902,7 +1902,7 @@ vint32m1_t test_vwsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_wv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_mu( @@ -1911,7 +1911,7 @@ vint32m1_t test_vwsub_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_mu( @@ -1920,7 +1920,7 @@ vint32m1_t test_vwsub_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwsub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_mu( @@ -1929,7 +1929,7 @@ vint32m2_t test_vwsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_mu( @@ -1938,7 +1938,7 @@ vint32m2_t test_vwsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwsub_wv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_mu( @@ -1947,7 +1947,7 @@ vint32m2_t test_vwsub_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_mu( @@ -1956,7 +1956,7 @@ vint32m2_t test_vwsub_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwsub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_mu( @@ -1965,7 +1965,7 @@ vint32m4_t test_vwsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_mu( @@ -1974,7 +1974,7 @@ vint32m4_t test_vwsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwsub_wv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_mu( @@ -1983,7 +1983,7 @@ vint32m4_t test_vwsub_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_mu( @@ -1992,7 +1992,7 @@ vint32m4_t test_vwsub_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwsub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_mu( @@ -2001,7 +2001,7 @@ vint32m8_t test_vwsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwsub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_mu( @@ -2010,7 +2010,7 @@ vint32m8_t test_vwsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwsub_wv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_mu( @@ -2019,7 +2019,7 @@ vint32m8_t test_vwsub_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwsub_wx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_mu( @@ -2028,7 +2028,7 @@ vint32m8_t test_vwsub_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_mu( @@ -2037,7 +2037,7 @@ vint64m1_t test_vwsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_mu( @@ -2046,7 +2046,7 @@ vint64m1_t test_vwsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_wv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_mu( @@ -2055,7 +2055,7 @@ vint64m1_t test_vwsub_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_mu( @@ -2064,7 +2064,7 @@ vint64m1_t test_vwsub_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwsub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_mu( @@ -2073,7 +2073,7 @@ vint64m2_t test_vwsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_mu( @@ -2082,7 +2082,7 @@ vint64m2_t test_vwsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwsub_wv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_mu( @@ -2091,7 +2091,7 @@ vint64m2_t test_vwsub_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_mu( @@ -2100,7 +2100,7 @@ vint64m2_t test_vwsub_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwsub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_mu( @@ -2109,7 +2109,7 @@ vint64m4_t test_vwsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_mu( @@ -2118,7 +2118,7 @@ vint64m4_t test_vwsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwsub_wv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_mu( @@ -2127,7 +2127,7 @@ vint64m4_t test_vwsub_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_mu( @@ -2136,7 +2136,7 @@ vint64m4_t test_vwsub_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwsub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_mu( @@ -2145,7 +2145,7 @@ vint64m8_t test_vwsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwsub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_mu( @@ -2154,7 +2154,7 @@ vint64m8_t test_vwsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwsub_wv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_mu( @@ -2163,6 +2163,6 @@ vint64m8_t test_vwsub_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwsub_wx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsub_wx_i64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwsubu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwsubu.c index 1c4b589cbe97..c25fdedf0f8a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwsubu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwsubu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_tu( @@ -21,7 +21,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_tu( @@ -30,7 +30,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_wv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_tu( @@ -39,7 +39,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_tu( @@ -48,7 +48,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_tu( @@ -57,7 +57,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_tu( @@ -66,7 +66,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, u // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_wv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_tu( @@ -75,7 +75,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_tu( @@ -84,7 +84,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_tu( @@ -93,7 +93,7 @@ vuint16m1_t test_vwsubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_tu( @@ -102,7 +102,7 @@ vuint16m1_t test_vwsubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_wv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_tu( @@ -111,7 +111,7 @@ vuint16m1_t test_vwsubu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_tu( @@ -120,7 +120,7 @@ vuint16m1_t test_vwsubu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_tu( @@ -129,7 +129,7 @@ vuint16m2_t test_vwsubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_tu( @@ -138,7 +138,7 @@ vuint16m2_t test_vwsubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_wv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_tu( @@ -147,7 +147,7 @@ vuint16m2_t test_vwsubu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_tu( @@ -156,7 +156,7 @@ vuint16m2_t test_vwsubu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_tu( @@ -165,7 +165,7 @@ vuint16m4_t test_vwsubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_tu( @@ -174,7 +174,7 @@ vuint16m4_t test_vwsubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_wv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_tu( @@ -183,7 +183,7 @@ vuint16m4_t test_vwsubu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_tu( @@ -192,7 +192,7 @@ vuint16m4_t test_vwsubu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_tu( @@ -201,7 +201,7 @@ vuint16m8_t test_vwsubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_tu( @@ -210,7 +210,7 @@ vuint16m8_t test_vwsubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_wv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_tu( @@ -219,7 +219,7 @@ vuint16m8_t test_vwsubu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_tu( @@ -228,7 +228,7 @@ vuint16m8_t test_vwsubu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_tu( @@ -237,7 +237,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_tu( @@ -246,7 +246,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_wv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_tu( @@ -255,7 +255,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_tu( @@ -264,7 +264,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_tu( @@ -273,7 +273,7 @@ vuint32m1_t test_vwsubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_tu( @@ -282,7 +282,7 @@ vuint32m1_t test_vwsubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_wv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_tu( @@ -291,7 +291,7 @@ vuint32m1_t test_vwsubu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_tu( @@ -300,7 +300,7 @@ vuint32m1_t test_vwsubu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_tu( @@ -309,7 +309,7 @@ vuint32m2_t test_vwsubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_tu( @@ -318,7 +318,7 @@ vuint32m2_t test_vwsubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_wv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_tu( @@ -327,7 +327,7 @@ vuint32m2_t test_vwsubu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_tu( @@ -336,7 +336,7 @@ vuint32m2_t test_vwsubu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_tu( @@ -345,7 +345,7 @@ vuint32m4_t test_vwsubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_tu( @@ -354,7 +354,7 @@ vuint32m4_t test_vwsubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_wv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_tu( @@ -363,7 +363,7 @@ vuint32m4_t test_vwsubu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_tu( @@ -372,7 +372,7 @@ vuint32m4_t test_vwsubu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_tu( @@ -381,7 +381,7 @@ vuint32m8_t test_vwsubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_tu( @@ -390,7 +390,7 @@ vuint32m8_t test_vwsubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_wv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_tu( @@ -399,7 +399,7 @@ vuint32m8_t test_vwsubu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tu( @@ -408,7 +408,7 @@ vuint32m8_t test_vwsubu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tu( @@ -417,7 +417,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tu( @@ -426,7 +426,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_wv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tu( @@ -435,7 +435,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_tu( @@ -444,7 +444,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_tu( @@ -453,7 +453,7 @@ vuint64m2_t test_vwsubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_tu( @@ -462,7 +462,7 @@ vuint64m2_t test_vwsubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_wv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_tu( @@ -471,7 +471,7 @@ vuint64m2_t test_vwsubu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_tu( @@ -480,7 +480,7 @@ vuint64m2_t test_vwsubu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_tu( @@ -489,7 +489,7 @@ vuint64m4_t test_vwsubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_tu( @@ -498,7 +498,7 @@ vuint64m4_t test_vwsubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_wv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_tu( @@ -507,7 +507,7 @@ vuint64m4_t test_vwsubu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_tu( @@ -516,7 +516,7 @@ vuint64m4_t test_vwsubu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_tu( @@ -525,7 +525,7 @@ vuint64m8_t test_vwsubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_tu( @@ -534,7 +534,7 @@ vuint64m8_t test_vwsubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_wv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_tu( @@ -543,7 +543,7 @@ vuint64m8_t test_vwsubu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_tum( @@ -552,7 +552,7 @@ vuint64m8_t test_vwsubu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_tum( @@ -561,7 +561,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_tum( @@ -570,7 +570,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_wv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_tum( @@ -579,7 +579,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_tum( @@ -588,7 +588,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_tum( @@ -597,7 +597,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_tum( @@ -606,7 +606,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_wv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_tum( @@ -615,7 +615,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_tum( @@ -624,7 +624,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_tum( @@ -633,7 +633,7 @@ vuint16m1_t test_vwsubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_tum( @@ -642,7 +642,7 @@ vuint16m1_t test_vwsubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_wv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_tum( @@ -651,7 +651,7 @@ vuint16m1_t test_vwsubu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_tum( @@ -660,7 +660,7 @@ vuint16m1_t test_vwsubu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_tum( @@ -669,7 +669,7 @@ vuint16m2_t test_vwsubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_tum( @@ -678,7 +678,7 @@ vuint16m2_t test_vwsubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_wv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_tum( @@ -687,7 +687,7 @@ vuint16m2_t test_vwsubu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_tum( @@ -696,7 +696,7 @@ vuint16m2_t test_vwsubu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_tum( @@ -705,7 +705,7 @@ vuint16m4_t test_vwsubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_tum( @@ -714,7 +714,7 @@ vuint16m4_t test_vwsubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_wv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_tum( @@ -723,7 +723,7 @@ vuint16m4_t test_vwsubu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_tum( @@ -732,7 +732,7 @@ vuint16m4_t test_vwsubu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_tum( @@ -741,7 +741,7 @@ vuint16m8_t test_vwsubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_tum( @@ -750,7 +750,7 @@ vuint16m8_t test_vwsubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_wv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_tum( @@ -759,7 +759,7 @@ vuint16m8_t test_vwsubu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_tum( @@ -768,7 +768,7 @@ vuint16m8_t test_vwsubu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_tum( @@ -777,7 +777,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_tum( @@ -786,7 +786,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_wv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_tum( @@ -795,7 +795,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_tum( @@ -804,7 +804,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_tum( @@ -813,7 +813,7 @@ vuint32m1_t test_vwsubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_tum( @@ -822,7 +822,7 @@ vuint32m1_t test_vwsubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_wv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_tum( @@ -831,7 +831,7 @@ vuint32m1_t test_vwsubu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_tum( @@ -840,7 +840,7 @@ vuint32m1_t test_vwsubu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_tum( @@ -849,7 +849,7 @@ vuint32m2_t test_vwsubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_tum( @@ -858,7 +858,7 @@ vuint32m2_t test_vwsubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_wv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_tum( @@ -867,7 +867,7 @@ vuint32m2_t test_vwsubu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_tum( @@ -876,7 +876,7 @@ vuint32m2_t test_vwsubu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_tum( @@ -885,7 +885,7 @@ vuint32m4_t test_vwsubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_tum( @@ -894,7 +894,7 @@ vuint32m4_t test_vwsubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_wv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_tum( @@ -903,7 +903,7 @@ vuint32m4_t test_vwsubu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_tum( @@ -912,7 +912,7 @@ vuint32m4_t test_vwsubu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_tum( @@ -921,7 +921,7 @@ vuint32m8_t test_vwsubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_tum( @@ -930,7 +930,7 @@ vuint32m8_t test_vwsubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_wv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_tum( @@ -939,7 +939,7 @@ vuint32m8_t test_vwsubu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tum( @@ -948,7 +948,7 @@ vuint32m8_t test_vwsubu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tum( @@ -957,7 +957,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tum( @@ -966,7 +966,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_wv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tum( @@ -975,7 +975,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_tum( @@ -984,7 +984,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_tum( @@ -993,7 +993,7 @@ vuint64m2_t test_vwsubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_tum( @@ -1002,7 +1002,7 @@ vuint64m2_t test_vwsubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_wv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_tum( @@ -1011,7 +1011,7 @@ vuint64m2_t test_vwsubu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_tum( @@ -1020,7 +1020,7 @@ vuint64m2_t test_vwsubu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_tum( @@ -1029,7 +1029,7 @@ vuint64m4_t test_vwsubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_tum( @@ -1038,7 +1038,7 @@ vuint64m4_t test_vwsubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_wv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_tum( @@ -1047,7 +1047,7 @@ vuint64m4_t test_vwsubu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_tum( @@ -1056,7 +1056,7 @@ vuint64m4_t test_vwsubu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_tum( @@ -1065,7 +1065,7 @@ vuint64m8_t test_vwsubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_tum( @@ -1074,7 +1074,7 @@ vuint64m8_t test_vwsubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_wv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_tum( @@ -1083,7 +1083,7 @@ vuint64m8_t test_vwsubu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_tumu( @@ -1092,7 +1092,7 @@ vuint64m8_t test_vwsubu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_tumu( @@ -1101,7 +1101,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_tumu( @@ -1110,7 +1110,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_wv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_tumu( @@ -1119,7 +1119,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_tumu( @@ -1128,7 +1128,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_tumu( @@ -1137,7 +1137,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_tumu( @@ -1146,7 +1146,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_wv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_tumu( @@ -1155,7 +1155,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_tumu( @@ -1164,7 +1164,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_tumu( @@ -1173,7 +1173,7 @@ vuint16m1_t test_vwsubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_tumu( @@ -1182,7 +1182,7 @@ vuint16m1_t test_vwsubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_wv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_tumu( @@ -1191,7 +1191,7 @@ vuint16m1_t test_vwsubu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_tumu( @@ -1200,7 +1200,7 @@ vuint16m1_t test_vwsubu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_tumu( @@ -1209,7 +1209,7 @@ vuint16m2_t test_vwsubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_tumu( @@ -1218,7 +1218,7 @@ vuint16m2_t test_vwsubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_wv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_tumu( @@ -1227,7 +1227,7 @@ vuint16m2_t test_vwsubu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_tumu( @@ -1236,7 +1236,7 @@ vuint16m2_t test_vwsubu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_tumu( @@ -1245,7 +1245,7 @@ vuint16m4_t test_vwsubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_tumu( @@ -1254,7 +1254,7 @@ vuint16m4_t test_vwsubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_wv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_tumu( @@ -1263,7 +1263,7 @@ vuint16m4_t test_vwsubu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_tumu( @@ -1272,7 +1272,7 @@ vuint16m4_t test_vwsubu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_tumu( @@ -1281,7 +1281,7 @@ vuint16m8_t test_vwsubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_tumu( @@ -1290,7 +1290,7 @@ vuint16m8_t test_vwsubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_wv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_tumu( @@ -1299,7 +1299,7 @@ vuint16m8_t test_vwsubu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_tumu( @@ -1308,7 +1308,7 @@ vuint16m8_t test_vwsubu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_tumu( @@ -1317,7 +1317,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_tumu( @@ -1326,7 +1326,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_wv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_tumu( @@ -1335,7 +1335,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_tumu( @@ -1344,7 +1344,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_tumu( @@ -1353,7 +1353,7 @@ vuint32m1_t test_vwsubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_tumu( @@ -1362,7 +1362,7 @@ vuint32m1_t test_vwsubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_wv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_tumu( @@ -1371,7 +1371,7 @@ vuint32m1_t test_vwsubu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_tumu( @@ -1380,7 +1380,7 @@ vuint32m1_t test_vwsubu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_tumu( @@ -1389,7 +1389,7 @@ vuint32m2_t test_vwsubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_tumu( @@ -1398,7 +1398,7 @@ vuint32m2_t test_vwsubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_wv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_tumu( @@ -1407,7 +1407,7 @@ vuint32m2_t test_vwsubu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_tumu( @@ -1416,7 +1416,7 @@ vuint32m2_t test_vwsubu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_tumu( @@ -1425,7 +1425,7 @@ vuint32m4_t test_vwsubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_tumu( @@ -1434,7 +1434,7 @@ vuint32m4_t test_vwsubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_wv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_tumu( @@ -1443,7 +1443,7 @@ vuint32m4_t test_vwsubu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_tumu( @@ -1452,7 +1452,7 @@ vuint32m4_t test_vwsubu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_tumu( @@ -1461,7 +1461,7 @@ vuint32m8_t test_vwsubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_tumu( @@ -1470,7 +1470,7 @@ vuint32m8_t test_vwsubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_wv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_tumu( @@ -1479,7 +1479,7 @@ vuint32m8_t test_vwsubu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_tumu( @@ -1488,7 +1488,7 @@ vuint32m8_t test_vwsubu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_tumu( @@ -1497,7 +1497,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_tumu( @@ -1506,7 +1506,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_wv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_tumu( @@ -1515,7 +1515,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_tumu( @@ -1524,7 +1524,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_tumu( @@ -1533,7 +1533,7 @@ vuint64m2_t test_vwsubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_tumu( @@ -1542,7 +1542,7 @@ vuint64m2_t test_vwsubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_wv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_tumu( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vwsubu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_tumu( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vwsubu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_tumu( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vwsubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_tumu( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vwsubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_wv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_tumu( @@ -1587,7 +1587,7 @@ vuint64m4_t test_vwsubu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_tumu( @@ -1596,7 +1596,7 @@ vuint64m4_t test_vwsubu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_tumu( @@ -1605,7 +1605,7 @@ vuint64m8_t test_vwsubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_tumu( @@ -1614,7 +1614,7 @@ vuint64m8_t test_vwsubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_wv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_tumu( @@ -1623,7 +1623,7 @@ vuint64m8_t test_vwsubu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_mu( @@ -1632,7 +1632,7 @@ vuint64m8_t test_vwsubu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_mu( @@ -1641,7 +1641,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_mu( @@ -1650,7 +1650,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_wv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_mu( @@ -1659,7 +1659,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_mu( @@ -1668,7 +1668,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_mu( @@ -1677,7 +1677,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_mu( @@ -1686,7 +1686,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_wv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_mu( @@ -1695,7 +1695,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_mu( @@ -1704,7 +1704,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_mu( @@ -1713,7 +1713,7 @@ vuint16m1_t test_vwsubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_mu( @@ -1722,7 +1722,7 @@ vuint16m1_t test_vwsubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_wv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_mu( @@ -1731,7 +1731,7 @@ vuint16m1_t test_vwsubu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_mu( @@ -1740,7 +1740,7 @@ vuint16m1_t test_vwsubu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_mu( @@ -1749,7 +1749,7 @@ vuint16m2_t test_vwsubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_mu( @@ -1758,7 +1758,7 @@ vuint16m2_t test_vwsubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_wv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_mu( @@ -1767,7 +1767,7 @@ vuint16m2_t test_vwsubu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_mu( @@ -1776,7 +1776,7 @@ vuint16m2_t test_vwsubu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_mu( @@ -1785,7 +1785,7 @@ vuint16m4_t test_vwsubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_mu( @@ -1794,7 +1794,7 @@ vuint16m4_t test_vwsubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_wv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_mu( @@ -1803,7 +1803,7 @@ vuint16m4_t test_vwsubu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_mu( @@ -1812,7 +1812,7 @@ vuint16m4_t test_vwsubu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_mu( @@ -1821,7 +1821,7 @@ vuint16m8_t test_vwsubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_mu( @@ -1830,7 +1830,7 @@ vuint16m8_t test_vwsubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_wv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_mu( @@ -1839,7 +1839,7 @@ vuint16m8_t test_vwsubu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_mu( @@ -1848,7 +1848,7 @@ vuint16m8_t test_vwsubu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_mu( @@ -1857,7 +1857,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_mu( @@ -1866,7 +1866,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_wv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_mu( @@ -1875,7 +1875,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_mu( @@ -1884,7 +1884,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_mu( @@ -1893,7 +1893,7 @@ vuint32m1_t test_vwsubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_mu( @@ -1902,7 +1902,7 @@ vuint32m1_t test_vwsubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_wv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_mu( @@ -1911,7 +1911,7 @@ vuint32m1_t test_vwsubu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_mu( @@ -1920,7 +1920,7 @@ vuint32m1_t test_vwsubu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_mu( @@ -1929,7 +1929,7 @@ vuint32m2_t test_vwsubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_mu( @@ -1938,7 +1938,7 @@ vuint32m2_t test_vwsubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_wv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_mu( @@ -1947,7 +1947,7 @@ vuint32m2_t test_vwsubu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_mu( @@ -1956,7 +1956,7 @@ vuint32m2_t test_vwsubu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_mu( @@ -1965,7 +1965,7 @@ vuint32m4_t test_vwsubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_mu( @@ -1974,7 +1974,7 @@ vuint32m4_t test_vwsubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_wv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_mu( @@ -1983,7 +1983,7 @@ vuint32m4_t test_vwsubu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_mu( @@ -1992,7 +1992,7 @@ vuint32m4_t test_vwsubu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_mu( @@ -2001,7 +2001,7 @@ vuint32m8_t test_vwsubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_mu( @@ -2010,7 +2010,7 @@ vuint32m8_t test_vwsubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_wv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_mu( @@ -2019,7 +2019,7 @@ vuint32m8_t test_vwsubu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_mu( @@ -2028,7 +2028,7 @@ vuint32m8_t test_vwsubu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_mu( @@ -2037,7 +2037,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_mu( @@ -2046,7 +2046,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_wv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_mu( @@ -2055,7 +2055,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_mu( @@ -2064,7 +2064,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_mu( @@ -2073,7 +2073,7 @@ vuint64m2_t test_vwsubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_mu( @@ -2082,7 +2082,7 @@ vuint64m2_t test_vwsubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_wv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_mu( @@ -2091,7 +2091,7 @@ vuint64m2_t test_vwsubu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_mu( @@ -2100,7 +2100,7 @@ vuint64m2_t test_vwsubu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_mu( @@ -2109,7 +2109,7 @@ vuint64m4_t test_vwsubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_mu( @@ -2118,7 +2118,7 @@ vuint64m4_t test_vwsubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_wv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_mu( @@ -2127,7 +2127,7 @@ vuint64m4_t test_vwsubu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_mu( @@ -2136,7 +2136,7 @@ vuint64m4_t test_vwsubu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_mu( @@ -2145,7 +2145,7 @@ vuint64m8_t test_vwsubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_mu( @@ -2154,7 +2154,7 @@ vuint64m8_t test_vwsubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_wv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_mu( @@ -2163,6 +2163,6 @@ vuint64m8_t test_vwsubu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vwsubu_wx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vxor.c index f3f97a9f59c7..c3373a4bc618 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vxor.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vxor_vv_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_tu( @@ -21,7 +21,7 @@ vint8mf8_t test_vxor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_tu( @@ -30,7 +30,7 @@ vint8mf8_t test_vxor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vxor_vv_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_tu( @@ -39,7 +39,7 @@ vint8mf4_t test_vxor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_tu( @@ -48,7 +48,7 @@ vint8mf4_t test_vxor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vxor_vv_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_tu( @@ -57,7 +57,7 @@ vint8mf2_t test_vxor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_tu( @@ -66,7 +66,7 @@ vint8mf2_t test_vxor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vxor_vv_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_tu( @@ -75,7 +75,7 @@ vint8m1_t test_vxor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_tu( @@ -84,7 +84,7 @@ vint8m1_t test_vxor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vxor_vv_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_tu( @@ -93,7 +93,7 @@ vint8m2_t test_vxor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_tu( @@ -102,7 +102,7 @@ vint8m2_t test_vxor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vxor_vv_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_tu( @@ -111,7 +111,7 @@ vint8m4_t test_vxor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_tu( @@ -120,7 +120,7 @@ vint8m4_t test_vxor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vxor_vv_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_tu( @@ -129,7 +129,7 @@ vint8m8_t test_vxor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_tu( @@ -138,7 +138,7 @@ vint8m8_t test_vxor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vxor_vv_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_tu( @@ -147,7 +147,7 @@ vint16mf4_t test_vxor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_tu( @@ -156,7 +156,7 @@ vint16mf4_t test_vxor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vxor_vv_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_tu( @@ -165,7 +165,7 @@ vint16mf2_t test_vxor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_tu( @@ -174,7 +174,7 @@ vint16mf2_t test_vxor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vxor_vv_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_tu( @@ -183,7 +183,7 @@ vint16m1_t test_vxor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_tu( @@ -192,7 +192,7 @@ vint16m1_t test_vxor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vxor_vv_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_tu( @@ -201,7 +201,7 @@ vint16m2_t test_vxor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_tu( @@ -210,7 +210,7 @@ vint16m2_t test_vxor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vxor_vv_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_tu( @@ -219,7 +219,7 @@ vint16m4_t test_vxor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_tu( @@ -228,7 +228,7 @@ vint16m4_t test_vxor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vxor_vv_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_tu( @@ -237,7 +237,7 @@ vint16m8_t test_vxor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tu( @@ -246,7 +246,7 @@ vint16m8_t test_vxor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vxor_vv_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tu( @@ -255,7 +255,7 @@ vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_tu( @@ -264,7 +264,7 @@ vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vxor_vv_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_tu( @@ -273,7 +273,7 @@ vint32m1_t test_vxor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_tu( @@ -282,7 +282,7 @@ vint32m1_t test_vxor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vxor_vv_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_tu( @@ -291,7 +291,7 @@ vint32m2_t test_vxor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_tu( @@ -300,7 +300,7 @@ vint32m2_t test_vxor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vxor_vv_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_tu( @@ -309,7 +309,7 @@ vint32m4_t test_vxor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_tu( @@ -318,7 +318,7 @@ vint32m4_t test_vxor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vxor_vv_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_tu( @@ -327,7 +327,7 @@ vint32m8_t test_vxor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_tu( @@ -336,7 +336,7 @@ vint32m8_t test_vxor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vxor_vv_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_tu( @@ -345,7 +345,7 @@ vint64m1_t test_vxor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_tu( @@ -354,7 +354,7 @@ vint64m1_t test_vxor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vxor_vv_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_tu( @@ -363,7 +363,7 @@ vint64m2_t test_vxor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_tu( @@ -372,7 +372,7 @@ vint64m2_t test_vxor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vxor_vv_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_tu( @@ -381,7 +381,7 @@ vint64m4_t test_vxor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_tu( @@ -390,7 +390,7 @@ vint64m4_t test_vxor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vxor_vv_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_tu( @@ -399,7 +399,7 @@ vint64m8_t test_vxor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_tu( @@ -408,7 +408,7 @@ vint64m8_t test_vxor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vxor_vv_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_tu( @@ -417,7 +417,7 @@ vuint8mf8_t test_vxor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_tu( @@ -426,7 +426,7 @@ vuint8mf8_t test_vxor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vxor_vv_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_tu( @@ -435,7 +435,7 @@ vuint8mf4_t test_vxor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_tu( @@ -444,7 +444,7 @@ vuint8mf4_t test_vxor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vxor_vv_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_tu( @@ -453,7 +453,7 @@ vuint8mf2_t test_vxor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_tu( @@ -462,7 +462,7 @@ vuint8mf2_t test_vxor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vxor_vv_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_tu( @@ -471,7 +471,7 @@ vuint8m1_t test_vxor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_tu( @@ -480,7 +480,7 @@ vuint8m1_t test_vxor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vxor_vv_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_tu( @@ -489,7 +489,7 @@ vuint8m2_t test_vxor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_tu( @@ -498,7 +498,7 @@ vuint8m2_t test_vxor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vxor_vv_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_tu( @@ -507,7 +507,7 @@ vuint8m4_t test_vxor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_tu( @@ -516,7 +516,7 @@ vuint8m4_t test_vxor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vxor_vv_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_tu( @@ -525,7 +525,7 @@ vuint8m8_t test_vxor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_tu( @@ -534,7 +534,7 @@ vuint8m8_t test_vxor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vxor_vv_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_tu( @@ -543,7 +543,7 @@ vuint16mf4_t test_vxor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16mf4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_tu( @@ -552,7 +552,7 @@ vuint16mf4_t test_vxor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vxor_vv_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_tu( @@ -561,7 +561,7 @@ vuint16mf2_t test_vxor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_tu( @@ -570,7 +570,7 @@ vuint16mf2_t test_vxor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vxor_vv_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_tu( @@ -579,7 +579,7 @@ vuint16m1_t test_vxor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_tu( @@ -588,7 +588,7 @@ vuint16m1_t test_vxor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vxor_vv_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_tu( @@ -597,7 +597,7 @@ vuint16m2_t test_vxor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_tu( @@ -606,7 +606,7 @@ vuint16m2_t test_vxor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vxor_vv_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_tu( @@ -615,7 +615,7 @@ vuint16m4_t test_vxor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_tu( @@ -624,7 +624,7 @@ vuint16m4_t test_vxor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vxor_vv_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_tu( @@ -633,7 +633,7 @@ vuint16m8_t test_vxor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tu( @@ -642,7 +642,7 @@ vuint16m8_t test_vxor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vxor_vv_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tu( @@ -651,7 +651,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32mf2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32mf2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_tu( @@ -660,7 +660,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, ui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vxor_vv_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_tu( @@ -669,7 +669,7 @@ vuint32m1_t test_vxor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_tu( @@ -678,7 +678,7 @@ vuint32m1_t test_vxor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vxor_vv_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_tu( @@ -687,7 +687,7 @@ vuint32m2_t test_vxor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_tu( @@ -696,7 +696,7 @@ vuint32m2_t test_vxor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vxor_vv_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_tu( @@ -705,7 +705,7 @@ vuint32m4_t test_vxor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_tu( @@ -714,7 +714,7 @@ vuint32m4_t test_vxor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vxor_vv_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_tu( @@ -723,7 +723,7 @@ vuint32m8_t test_vxor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_tu( @@ -732,7 +732,7 @@ vuint32m8_t test_vxor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vxor_vv_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_tu( @@ -741,7 +741,7 @@ vuint64m1_t test_vxor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m1_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m1_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_tu( @@ -750,7 +750,7 @@ vuint64m1_t test_vxor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vxor_vv_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_tu( @@ -759,7 +759,7 @@ vuint64m2_t test_vxor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m2_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m2_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_tu( @@ -768,7 +768,7 @@ vuint64m2_t test_vxor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vxor_vv_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_tu( @@ -777,7 +777,7 @@ vuint64m4_t test_vxor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m4_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m4_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_tu( @@ -786,7 +786,7 @@ vuint64m4_t test_vxor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vxor_vv_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_tu( @@ -795,7 +795,7 @@ vuint64m8_t test_vxor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m8_tu(maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m8_tu(maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_tum( @@ -804,7 +804,7 @@ vuint64m8_t test_vxor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vxor_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_tum( @@ -813,7 +813,7 @@ vint8mf8_t test_vxor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_tum( @@ -822,7 +822,7 @@ vint8mf8_t test_vxor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vxor_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_tum( @@ -831,7 +831,7 @@ vint8mf4_t test_vxor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_tum( @@ -840,7 +840,7 @@ vint8mf4_t test_vxor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vxor_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_tum( @@ -849,7 +849,7 @@ vint8mf2_t test_vxor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_tum( @@ -858,7 +858,7 @@ vint8mf2_t test_vxor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vxor_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_tum( @@ -867,7 +867,7 @@ vint8m1_t test_vxor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_tum( @@ -876,7 +876,7 @@ vint8m1_t test_vxor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vxor_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_tum( @@ -885,7 +885,7 @@ vint8m2_t test_vxor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_tum( @@ -894,7 +894,7 @@ vint8m2_t test_vxor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vxor_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_tum( @@ -903,7 +903,7 @@ vint8m4_t test_vxor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_tum( @@ -912,7 +912,7 @@ vint8m4_t test_vxor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vxor_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_tum( @@ -921,7 +921,7 @@ vint8m8_t test_vxor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_tum( @@ -930,7 +930,7 @@ vint8m8_t test_vxor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vxor_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_tum( @@ -939,7 +939,7 @@ vint16mf4_t test_vxor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_tum( @@ -948,7 +948,7 @@ vint16mf4_t test_vxor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vxor_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_tum( @@ -957,7 +957,7 @@ vint16mf2_t test_vxor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_tum( @@ -966,7 +966,7 @@ vint16mf2_t test_vxor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vxor_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_tum( @@ -975,7 +975,7 @@ vint16m1_t test_vxor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_tum( @@ -984,7 +984,7 @@ vint16m1_t test_vxor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vxor_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_tum( @@ -993,7 +993,7 @@ vint16m2_t test_vxor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_tum( @@ -1002,7 +1002,7 @@ vint16m2_t test_vxor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vxor_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_tum( @@ -1011,7 +1011,7 @@ vint16m4_t test_vxor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_tum( @@ -1020,7 +1020,7 @@ vint16m4_t test_vxor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vxor_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_tum( @@ -1029,7 +1029,7 @@ vint16m8_t test_vxor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tum( @@ -1038,7 +1038,7 @@ vint16m8_t test_vxor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vxor_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tum( @@ -1047,7 +1047,7 @@ vint32mf2_t test_vxor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_tum( @@ -1056,7 +1056,7 @@ vint32mf2_t test_vxor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vxor_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_tum( @@ -1065,7 +1065,7 @@ vint32m1_t test_vxor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_tum( @@ -1074,7 +1074,7 @@ vint32m1_t test_vxor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vxor_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_tum( @@ -1083,7 +1083,7 @@ vint32m2_t test_vxor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_tum( @@ -1092,7 +1092,7 @@ vint32m2_t test_vxor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vxor_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_tum( @@ -1101,7 +1101,7 @@ vint32m4_t test_vxor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_tum( @@ -1110,7 +1110,7 @@ vint32m4_t test_vxor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vxor_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_tum( @@ -1119,7 +1119,7 @@ vint32m8_t test_vxor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_tum( @@ -1128,7 +1128,7 @@ vint32m8_t test_vxor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vxor_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_tum( @@ -1137,7 +1137,7 @@ vint64m1_t test_vxor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_tum( @@ -1146,7 +1146,7 @@ vint64m1_t test_vxor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vxor_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_tum( @@ -1155,7 +1155,7 @@ vint64m2_t test_vxor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_tum( @@ -1164,7 +1164,7 @@ vint64m2_t test_vxor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vxor_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_tum( @@ -1173,7 +1173,7 @@ vint64m4_t test_vxor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_tum( @@ -1182,7 +1182,7 @@ vint64m4_t test_vxor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vxor_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_tum( @@ -1191,7 +1191,7 @@ vint64m8_t test_vxor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_tum( @@ -1200,7 +1200,7 @@ vint64m8_t test_vxor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vxor_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_tum( @@ -1209,7 +1209,7 @@ vuint8mf8_t test_vxor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_tum( @@ -1218,7 +1218,7 @@ vuint8mf8_t test_vxor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vxor_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_tum( @@ -1227,7 +1227,7 @@ vuint8mf4_t test_vxor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_tum( @@ -1236,7 +1236,7 @@ vuint8mf4_t test_vxor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vxor_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_tum( @@ -1245,7 +1245,7 @@ vuint8mf2_t test_vxor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_tum( @@ -1254,7 +1254,7 @@ vuint8mf2_t test_vxor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vxor_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_tum( @@ -1263,7 +1263,7 @@ vuint8m1_t test_vxor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_tum( @@ -1272,7 +1272,7 @@ vuint8m1_t test_vxor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vxor_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_tum( @@ -1281,7 +1281,7 @@ vuint8m2_t test_vxor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_tum( @@ -1290,7 +1290,7 @@ vuint8m2_t test_vxor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vxor_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_tum( @@ -1299,7 +1299,7 @@ vuint8m4_t test_vxor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_tum( @@ -1308,7 +1308,7 @@ vuint8m4_t test_vxor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vxor_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_tum( @@ -1317,7 +1317,7 @@ vuint8m8_t test_vxor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_tum( @@ -1326,7 +1326,7 @@ vuint8m8_t test_vxor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vxor_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_tum( @@ -1335,7 +1335,7 @@ vuint16mf4_t test_vxor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_tum( @@ -1344,7 +1344,7 @@ vuint16mf4_t test_vxor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vxor_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_tum( @@ -1353,7 +1353,7 @@ vuint16mf2_t test_vxor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_tum( @@ -1362,7 +1362,7 @@ vuint16mf2_t test_vxor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vxor_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_tum( @@ -1371,7 +1371,7 @@ vuint16m1_t test_vxor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_tum( @@ -1380,7 +1380,7 @@ vuint16m1_t test_vxor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vxor_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_tum( @@ -1389,7 +1389,7 @@ vuint16m2_t test_vxor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_tum( @@ -1398,7 +1398,7 @@ vuint16m2_t test_vxor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vxor_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_tum( @@ -1407,7 +1407,7 @@ vuint16m4_t test_vxor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_tum( @@ -1416,7 +1416,7 @@ vuint16m4_t test_vxor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vxor_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_tum( @@ -1425,7 +1425,7 @@ vuint16m8_t test_vxor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tum( @@ -1434,7 +1434,7 @@ vuint16m8_t test_vxor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vxor_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tum( @@ -1443,7 +1443,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_tum( @@ -1452,7 +1452,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vxor_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_tum( @@ -1461,7 +1461,7 @@ vuint32m1_t test_vxor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_tum( @@ -1470,7 +1470,7 @@ vuint32m1_t test_vxor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vxor_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_tum( @@ -1479,7 +1479,7 @@ vuint32m2_t test_vxor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_tum( @@ -1488,7 +1488,7 @@ vuint32m2_t test_vxor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vxor_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_tum( @@ -1497,7 +1497,7 @@ vuint32m4_t test_vxor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_tum( @@ -1506,7 +1506,7 @@ vuint32m4_t test_vxor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vxor_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_tum( @@ -1515,7 +1515,7 @@ vuint32m8_t test_vxor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_tum( @@ -1524,7 +1524,7 @@ vuint32m8_t test_vxor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vxor_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_tum( @@ -1533,7 +1533,7 @@ vuint64m1_t test_vxor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_tum( @@ -1542,7 +1542,7 @@ vuint64m1_t test_vxor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vxor_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_tum( @@ -1551,7 +1551,7 @@ vuint64m2_t test_vxor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_tum( @@ -1560,7 +1560,7 @@ vuint64m2_t test_vxor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vxor_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_tum( @@ -1569,7 +1569,7 @@ vuint64m4_t test_vxor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_tum( @@ -1578,7 +1578,7 @@ vuint64m4_t test_vxor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vxor_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_tum( @@ -1587,7 +1587,7 @@ vuint64m8_t test_vxor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_tumu( @@ -1596,7 +1596,7 @@ vuint64m8_t test_vxor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vxor_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_tumu( @@ -1605,7 +1605,7 @@ vint8mf8_t test_vxor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_tumu( @@ -1614,7 +1614,7 @@ vint8mf8_t test_vxor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vxor_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_tumu( @@ -1623,7 +1623,7 @@ vint8mf4_t test_vxor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_tumu( @@ -1632,7 +1632,7 @@ vint8mf4_t test_vxor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vxor_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_tumu( @@ -1641,7 +1641,7 @@ vint8mf2_t test_vxor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_tumu( @@ -1650,7 +1650,7 @@ vint8mf2_t test_vxor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vxor_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_tumu( @@ -1659,7 +1659,7 @@ vint8m1_t test_vxor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_tumu( @@ -1668,7 +1668,7 @@ vint8m1_t test_vxor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vxor_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_tumu( @@ -1677,7 +1677,7 @@ vint8m2_t test_vxor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_tumu( @@ -1686,7 +1686,7 @@ vint8m2_t test_vxor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vxor_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_tumu( @@ -1695,7 +1695,7 @@ vint8m4_t test_vxor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_tumu( @@ -1704,7 +1704,7 @@ vint8m4_t test_vxor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vxor_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_tumu( @@ -1713,7 +1713,7 @@ vint8m8_t test_vxor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_tumu( @@ -1722,7 +1722,7 @@ vint8m8_t test_vxor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vxor_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_tumu( @@ -1731,7 +1731,7 @@ vint16mf4_t test_vxor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_tumu( @@ -1740,7 +1740,7 @@ vint16mf4_t test_vxor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vxor_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_tumu( @@ -1749,7 +1749,7 @@ vint16mf2_t test_vxor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_tumu( @@ -1758,7 +1758,7 @@ vint16mf2_t test_vxor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vxor_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_tumu( @@ -1767,7 +1767,7 @@ vint16m1_t test_vxor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_tumu( @@ -1776,7 +1776,7 @@ vint16m1_t test_vxor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vxor_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_tumu( @@ -1785,7 +1785,7 @@ vint16m2_t test_vxor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_tumu( @@ -1794,7 +1794,7 @@ vint16m2_t test_vxor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vxor_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_tumu( @@ -1803,7 +1803,7 @@ vint16m4_t test_vxor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_tumu( @@ -1812,7 +1812,7 @@ vint16m4_t test_vxor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vxor_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_tumu( @@ -1821,7 +1821,7 @@ vint16m8_t test_vxor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_tumu( @@ -1830,7 +1830,7 @@ vint16m8_t test_vxor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vxor_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_tumu( @@ -1839,7 +1839,7 @@ vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_tumu( @@ -1848,7 +1848,7 @@ vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vxor_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_tumu( @@ -1857,7 +1857,7 @@ vint32m1_t test_vxor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_tumu( @@ -1866,7 +1866,7 @@ vint32m1_t test_vxor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vxor_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_tumu( @@ -1875,7 +1875,7 @@ vint32m2_t test_vxor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_tumu( @@ -1884,7 +1884,7 @@ vint32m2_t test_vxor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vxor_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_tumu( @@ -1893,7 +1893,7 @@ vint32m4_t test_vxor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_tumu( @@ -1902,7 +1902,7 @@ vint32m4_t test_vxor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vxor_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_tumu( @@ -1911,7 +1911,7 @@ vint32m8_t test_vxor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_tumu( @@ -1920,7 +1920,7 @@ vint32m8_t test_vxor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vxor_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_tumu( @@ -1929,7 +1929,7 @@ vint64m1_t test_vxor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_tumu( @@ -1938,7 +1938,7 @@ vint64m1_t test_vxor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vxor_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_tumu( @@ -1947,7 +1947,7 @@ vint64m2_t test_vxor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_tumu( @@ -1956,7 +1956,7 @@ vint64m2_t test_vxor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vxor_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_tumu( @@ -1965,7 +1965,7 @@ vint64m4_t test_vxor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_tumu( @@ -1974,7 +1974,7 @@ vint64m4_t test_vxor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vxor_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_tumu( @@ -1983,7 +1983,7 @@ vint64m8_t test_vxor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_tumu( @@ -1992,7 +1992,7 @@ vint64m8_t test_vxor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vxor_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_tumu( @@ -2001,7 +2001,7 @@ vuint8mf8_t test_vxor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_tumu( @@ -2010,7 +2010,7 @@ vuint8mf8_t test_vxor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vxor_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_tumu( @@ -2019,7 +2019,7 @@ vuint8mf4_t test_vxor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_tumu( @@ -2028,7 +2028,7 @@ vuint8mf4_t test_vxor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vxor_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_tumu( @@ -2037,7 +2037,7 @@ vuint8mf2_t test_vxor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_tumu( @@ -2046,7 +2046,7 @@ vuint8mf2_t test_vxor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vxor_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_tumu( @@ -2055,7 +2055,7 @@ vuint8m1_t test_vxor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_tumu( @@ -2064,7 +2064,7 @@ vuint8m1_t test_vxor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vxor_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_tumu( @@ -2073,7 +2073,7 @@ vuint8m2_t test_vxor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_tumu( @@ -2082,7 +2082,7 @@ vuint8m2_t test_vxor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vxor_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_tumu( @@ -2091,7 +2091,7 @@ vuint8m4_t test_vxor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_tumu( @@ -2100,7 +2100,7 @@ vuint8m4_t test_vxor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vxor_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_tumu( @@ -2109,7 +2109,7 @@ vuint8m8_t test_vxor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_tumu( @@ -2118,7 +2118,7 @@ vuint8m8_t test_vxor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vxor_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_tumu( @@ -2127,7 +2127,7 @@ vuint16mf4_t test_vxor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_tumu( @@ -2136,7 +2136,7 @@ vuint16mf4_t test_vxor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vxor_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_tumu( @@ -2145,7 +2145,7 @@ vuint16mf2_t test_vxor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_tumu( @@ -2154,7 +2154,7 @@ vuint16mf2_t test_vxor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vxor_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_tumu( @@ -2163,7 +2163,7 @@ vuint16m1_t test_vxor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_tumu( @@ -2172,7 +2172,7 @@ vuint16m1_t test_vxor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vxor_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_tumu( @@ -2181,7 +2181,7 @@ vuint16m2_t test_vxor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_tumu( @@ -2190,7 +2190,7 @@ vuint16m2_t test_vxor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vxor_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_tumu( @@ -2199,7 +2199,7 @@ vuint16m4_t test_vxor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_tumu( @@ -2208,7 +2208,7 @@ vuint16m4_t test_vxor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vxor_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_tumu( @@ -2217,7 +2217,7 @@ vuint16m8_t test_vxor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_tumu( @@ -2226,7 +2226,7 @@ vuint16m8_t test_vxor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vxor_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_tumu( @@ -2235,7 +2235,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_tumu( @@ -2244,7 +2244,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vxor_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_tumu( @@ -2253,7 +2253,7 @@ vuint32m1_t test_vxor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_tumu( @@ -2262,7 +2262,7 @@ vuint32m1_t test_vxor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vxor_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_tumu( @@ -2271,7 +2271,7 @@ vuint32m2_t test_vxor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_tumu( @@ -2280,7 +2280,7 @@ vuint32m2_t test_vxor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vxor_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_tumu( @@ -2289,7 +2289,7 @@ vuint32m4_t test_vxor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_tumu( @@ -2298,7 +2298,7 @@ vuint32m4_t test_vxor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vxor_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_tumu( @@ -2307,7 +2307,7 @@ vuint32m8_t test_vxor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_tumu( @@ -2316,7 +2316,7 @@ vuint32m8_t test_vxor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint3 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vxor_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_tumu( @@ -2325,7 +2325,7 @@ vuint64m1_t test_vxor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_tumu( @@ -2334,7 +2334,7 @@ vuint64m1_t test_vxor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vxor_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_tumu( @@ -2343,7 +2343,7 @@ vuint64m2_t test_vxor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_tumu( @@ -2352,7 +2352,7 @@ vuint64m2_t test_vxor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vxor_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_tumu( @@ -2361,7 +2361,7 @@ vuint64m4_t test_vxor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_tumu( @@ -2370,7 +2370,7 @@ vuint64m4_t test_vxor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vxor_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_tumu( @@ -2379,7 +2379,7 @@ vuint64m8_t test_vxor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_mu( @@ -2388,7 +2388,7 @@ vuint64m8_t test_vxor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint6 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vxor_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_mu( @@ -2397,7 +2397,7 @@ vint8mf8_t test_vxor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_mu( @@ -2406,7 +2406,7 @@ vint8mf8_t test_vxor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vxor_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_mu( @@ -2415,7 +2415,7 @@ vint8mf4_t test_vxor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_mu( @@ -2424,7 +2424,7 @@ vint8mf4_t test_vxor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vxor_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_mu( @@ -2433,7 +2433,7 @@ vint8mf2_t test_vxor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_mu( @@ -2442,7 +2442,7 @@ vint8mf2_t test_vxor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vxor_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_mu( @@ -2451,7 +2451,7 @@ vint8m1_t test_vxor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_mu( @@ -2460,7 +2460,7 @@ vint8m1_t test_vxor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vxor_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_mu( @@ -2469,7 +2469,7 @@ vint8m2_t test_vxor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_mu( @@ -2478,7 +2478,7 @@ vint8m2_t test_vxor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vxor_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_mu( @@ -2487,7 +2487,7 @@ vint8m4_t test_vxor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_mu( @@ -2496,7 +2496,7 @@ vint8m4_t test_vxor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vxor_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_mu( @@ -2505,7 +2505,7 @@ vint8m8_t test_vxor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vxor_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_mu( @@ -2514,7 +2514,7 @@ vint8m8_t test_vxor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vxor_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_mu( @@ -2523,7 +2523,7 @@ vint16mf4_t test_vxor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_mu( @@ -2532,7 +2532,7 @@ vint16mf4_t test_vxor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vxor_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_mu( @@ -2541,7 +2541,7 @@ vint16mf2_t test_vxor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_mu( @@ -2550,7 +2550,7 @@ vint16mf2_t test_vxor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vxor_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_mu( @@ -2559,7 +2559,7 @@ vint16m1_t test_vxor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_mu( @@ -2568,7 +2568,7 @@ vint16m1_t test_vxor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vxor_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_mu( @@ -2577,7 +2577,7 @@ vint16m2_t test_vxor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_mu( @@ -2586,7 +2586,7 @@ vint16m2_t test_vxor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vxor_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_mu( @@ -2595,7 +2595,7 @@ vint16m4_t test_vxor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_mu( @@ -2604,7 +2604,7 @@ vint16m4_t test_vxor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vxor_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_mu( @@ -2613,7 +2613,7 @@ vint16m8_t test_vxor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vxor_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_mu( @@ -2622,7 +2622,7 @@ vint16m8_t test_vxor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vxor_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_mu( @@ -2631,7 +2631,7 @@ vint32mf2_t test_vxor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_mu( @@ -2640,7 +2640,7 @@ vint32mf2_t test_vxor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vxor_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_mu( @@ -2649,7 +2649,7 @@ vint32m1_t test_vxor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_mu( @@ -2658,7 +2658,7 @@ vint32m1_t test_vxor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vxor_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_mu( @@ -2667,7 +2667,7 @@ vint32m2_t test_vxor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_mu( @@ -2676,7 +2676,7 @@ vint32m2_t test_vxor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vxor_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_mu( @@ -2685,7 +2685,7 @@ vint32m4_t test_vxor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_mu( @@ -2694,7 +2694,7 @@ vint32m4_t test_vxor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vxor_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_mu( @@ -2703,7 +2703,7 @@ vint32m8_t test_vxor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vxor_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_mu( @@ -2712,7 +2712,7 @@ vint32m8_t test_vxor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vxor_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_mu( @@ -2721,7 +2721,7 @@ vint64m1_t test_vxor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_mu( @@ -2730,7 +2730,7 @@ vint64m1_t test_vxor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vxor_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_mu( @@ -2739,7 +2739,7 @@ vint64m2_t test_vxor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_mu( @@ -2748,7 +2748,7 @@ vint64m2_t test_vxor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vxor_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_mu( @@ -2757,7 +2757,7 @@ vint64m4_t test_vxor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_mu( @@ -2766,7 +2766,7 @@ vint64m4_t test_vxor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vxor_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_mu( @@ -2775,7 +2775,7 @@ vint64m8_t test_vxor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vxor_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_mu( @@ -2784,7 +2784,7 @@ vint64m8_t test_vxor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vxor_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_mu( @@ -2793,7 +2793,7 @@ vuint8mf8_t test_vxor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_mu( @@ -2802,7 +2802,7 @@ vuint8mf8_t test_vxor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vxor_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_mu( @@ -2811,7 +2811,7 @@ vuint8mf4_t test_vxor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_mu( @@ -2820,7 +2820,7 @@ vuint8mf4_t test_vxor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vxor_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_mu( @@ -2829,7 +2829,7 @@ vuint8mf2_t test_vxor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_mu( @@ -2838,7 +2838,7 @@ vuint8mf2_t test_vxor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vxor_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_mu( @@ -2847,7 +2847,7 @@ vuint8m1_t test_vxor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_mu( @@ -2856,7 +2856,7 @@ vuint8m1_t test_vxor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vxor_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_mu( @@ -2865,7 +2865,7 @@ vuint8m2_t test_vxor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_mu( @@ -2874,7 +2874,7 @@ vuint8m2_t test_vxor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vxor_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_mu( @@ -2883,7 +2883,7 @@ vuint8m4_t test_vxor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_mu( @@ -2892,7 +2892,7 @@ vuint8m4_t test_vxor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vxor_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_mu( @@ -2901,7 +2901,7 @@ vuint8m8_t test_vxor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vxor_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_mu( @@ -2910,7 +2910,7 @@ vuint8m8_t test_vxor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vxor_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_mu( @@ -2919,7 +2919,7 @@ vuint16mf4_t test_vxor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_mu( @@ -2928,7 +2928,7 @@ vuint16mf4_t test_vxor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vxor_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_mu( @@ -2937,7 +2937,7 @@ vuint16mf2_t test_vxor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_mu( @@ -2946,7 +2946,7 @@ vuint16mf2_t test_vxor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vxor_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_mu( @@ -2955,7 +2955,7 @@ vuint16m1_t test_vxor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_mu( @@ -2964,7 +2964,7 @@ vuint16m1_t test_vxor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vxor_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_mu( @@ -2973,7 +2973,7 @@ vuint16m2_t test_vxor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_mu( @@ -2982,7 +2982,7 @@ vuint16m2_t test_vxor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vxor_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_mu( @@ -2991,7 +2991,7 @@ vuint16m4_t test_vxor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_mu( @@ -3000,7 +3000,7 @@ vuint16m4_t test_vxor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vxor_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_mu( @@ -3009,7 +3009,7 @@ vuint16m8_t test_vxor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vxor_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_mu( @@ -3018,7 +3018,7 @@ vuint16m8_t test_vxor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vxor_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_mu( @@ -3027,7 +3027,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_mu( @@ -3036,7 +3036,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vxor_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_mu( @@ -3045,7 +3045,7 @@ vuint32m1_t test_vxor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_mu( @@ -3054,7 +3054,7 @@ vuint32m1_t test_vxor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vxor_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_mu( @@ -3063,7 +3063,7 @@ vuint32m2_t test_vxor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_mu( @@ -3072,7 +3072,7 @@ vuint32m2_t test_vxor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vxor_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_mu( @@ -3081,7 +3081,7 @@ vuint32m4_t test_vxor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_mu( @@ -3090,7 +3090,7 @@ vuint32m4_t test_vxor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vxor_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_mu( @@ -3099,7 +3099,7 @@ vuint32m8_t test_vxor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vxor_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_mu( @@ -3108,7 +3108,7 @@ vuint32m8_t test_vxor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vxor_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_mu( @@ -3117,7 +3117,7 @@ vuint64m1_t test_vxor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_mu( @@ -3126,7 +3126,7 @@ vuint64m1_t test_vxor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vxor_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_mu( @@ -3135,7 +3135,7 @@ vuint64m2_t test_vxor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_mu( @@ -3144,7 +3144,7 @@ vuint64m2_t test_vxor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vxor_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_mu( @@ -3153,7 +3153,7 @@ vuint64m4_t test_vxor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_mu( @@ -3162,7 +3162,7 @@ vuint64m4_t test_vxor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vxor_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_mu( @@ -3171,6 +3171,6 @@ vuint64m8_t test_vxor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vxor_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); + return __riscv_vxor_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext.c index b7b7d5e267ba..8073b5475a35 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf2_u16mf4_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u16mf4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_tu( @@ -21,7 +21,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf2_u16mf2_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u16mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_tu( @@ -30,7 +30,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf2_u16m1_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_tu( @@ -39,7 +39,7 @@ vuint16m1_t test_vzext_vf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf2_u16m2_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_tu( @@ -48,7 +48,7 @@ vuint16m2_t test_vzext_vf2_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf2_u16m4_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_tu( @@ -57,7 +57,7 @@ vuint16m4_t test_vzext_vf2_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return vzext_vf2_u16m8_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tu( @@ -66,7 +66,7 @@ vuint16m8_t test_vzext_vf2_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf4_u32mf2_tu(maskedoff, op1, vl); + return __riscv_vzext_vf4_u32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tu( @@ -75,7 +75,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t maskedoff, vuint8mf8_t op1, s // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf4_u32m1_tu(maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tu( @@ -84,7 +84,7 @@ vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t maskedoff, vuint8mf4_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf4_u32m2_tu(maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tu( @@ -93,7 +93,7 @@ vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t maskedoff, vuint8mf2_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf4_u32m4_tu(maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tu( @@ -102,7 +102,7 @@ vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t maskedoff, vuint8m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf4_u32m8_tu(maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tu( @@ -111,7 +111,7 @@ vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t maskedoff, vuint8m2_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf8_u64m1_tu(maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tu( @@ -120,7 +120,7 @@ vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf8_u64m2_tu(maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tu( @@ -129,7 +129,7 @@ vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf8_u64m4_tu(maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tu( @@ -138,7 +138,7 @@ vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf8_u64m8_tu(maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tu( @@ -147,7 +147,7 @@ vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf2_u32mf2_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u32mf2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_tu( @@ -156,7 +156,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf2_u32m1_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_tu( @@ -165,7 +165,7 @@ vuint32m1_t test_vzext_vf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf2_u32m2_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_tu( @@ -174,7 +174,7 @@ vuint32m2_t test_vzext_vf2_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf2_u32m4_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_tu( @@ -183,7 +183,7 @@ vuint32m4_t test_vzext_vf2_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return vzext_vf2_u32m8_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tu( @@ -192,7 +192,7 @@ vuint32m8_t test_vzext_vf2_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf4_u64m1_tu(maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tu( @@ -201,7 +201,7 @@ vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t maskedoff, vuint16mf4_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf4_u64m2_tu(maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tu( @@ -210,7 +210,7 @@ vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t maskedoff, vuint16mf2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf4_u64m4_tu(maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tu( @@ -219,7 +219,7 @@ vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t maskedoff, vuint16m1_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf4_u64m8_tu(maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tu( @@ -228,7 +228,7 @@ vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t maskedoff, vuint16m2_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vzext_vf2_u64m1_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m1_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_tu( @@ -237,7 +237,7 @@ vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return vzext_vf2_u64m2_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m2_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_tu( @@ -246,7 +246,7 @@ vuint64m2_t test_vzext_vf2_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return vzext_vf2_u64m4_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m4_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_tu( @@ -255,7 +255,7 @@ vuint64m4_t test_vzext_vf2_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return vzext_vf2_u64m8_tu(maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m8_tu(maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_tum( @@ -264,7 +264,7 @@ vuint64m8_t test_vzext_vf2_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, size // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf2_u16mf4_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16mf4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_tum( @@ -273,7 +273,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf2_u16mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_tum( @@ -282,7 +282,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf2_u16m1_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_tum( @@ -291,7 +291,7 @@ vuint16m1_t test_vzext_vf2_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf2_u16m2_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_tum( @@ -300,7 +300,7 @@ vuint16m2_t test_vzext_vf2_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf2_u16m4_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_tum( @@ -309,7 +309,7 @@ vuint16m4_t test_vzext_vf2_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return vzext_vf2_u16m8_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tum( @@ -318,7 +318,7 @@ vuint16m8_t test_vzext_vf2_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf4_u32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tum( @@ -327,7 +327,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf4_u32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tum( @@ -336,7 +336,7 @@ vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf4_u32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tum( @@ -345,7 +345,7 @@ vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf4_u32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tum( @@ -354,7 +354,7 @@ vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf4_u32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tum( @@ -363,7 +363,7 @@ vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf8_u64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tum( @@ -372,7 +372,7 @@ vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf8_u64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tum( @@ -381,7 +381,7 @@ vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf8_u64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tum( @@ -390,7 +390,7 @@ vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf8_u64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tum( @@ -399,7 +399,7 @@ vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf2_u32mf2_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32mf2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_tum( @@ -408,7 +408,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, v // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf2_u32m1_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_tum( @@ -417,7 +417,7 @@ vuint32m1_t test_vzext_vf2_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf2_u32m2_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_tum( @@ -426,7 +426,7 @@ vuint32m2_t test_vzext_vf2_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf2_u32m4_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_tum( @@ -435,7 +435,7 @@ vuint32m4_t test_vzext_vf2_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return vzext_vf2_u32m8_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tum( @@ -444,7 +444,7 @@ vuint32m8_t test_vzext_vf2_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf4_u64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tum( @@ -453,7 +453,7 @@ vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf4_u64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tum( @@ -462,7 +462,7 @@ vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf4_u64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tum( @@ -471,7 +471,7 @@ vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf4_u64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tum( @@ -480,7 +480,7 @@ vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vzext_vf2_u64m1_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m1_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_tum( @@ -489,7 +489,7 @@ vuint64m1_t test_vzext_vf2_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return vzext_vf2_u64m2_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m2_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_tum( @@ -498,7 +498,7 @@ vuint64m2_t test_vzext_vf2_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return vzext_vf2_u64m4_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m4_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_tum( @@ -507,7 +507,7 @@ vuint64m4_t test_vzext_vf2_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return vzext_vf2_u64m8_tum(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m8_tum(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_tumu( @@ -516,7 +516,7 @@ vuint64m8_t test_vzext_vf2_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf2_u16mf4_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16mf4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_tumu( @@ -525,7 +525,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf2_u16mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_tumu( @@ -534,7 +534,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf2_u16m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_tumu( @@ -543,7 +543,7 @@ vuint16m1_t test_vzext_vf2_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf2_u16m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_tumu( @@ -552,7 +552,7 @@ vuint16m2_t test_vzext_vf2_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf2_u16m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_tumu( @@ -561,7 +561,7 @@ vuint16m4_t test_vzext_vf2_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return vzext_vf2_u16m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tumu( @@ -570,7 +570,7 @@ vuint16m8_t test_vzext_vf2_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf4_u32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tumu( @@ -579,7 +579,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf4_u32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tumu( @@ -588,7 +588,7 @@ vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf4_u32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tumu( @@ -597,7 +597,7 @@ vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf4_u32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tumu( @@ -606,7 +606,7 @@ vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf4_u32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tumu( @@ -615,7 +615,7 @@ vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf8_u64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tumu( @@ -624,7 +624,7 @@ vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf8_u64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tumu( @@ -633,7 +633,7 @@ vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf8_u64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tumu( @@ -642,7 +642,7 @@ vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf8_u64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tumu( @@ -651,7 +651,7 @@ vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf2_u32mf2_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32mf2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_tumu( @@ -660,7 +660,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf2_u32m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_tumu( @@ -669,7 +669,7 @@ vuint32m1_t test_vzext_vf2_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf2_u32m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_tumu( @@ -678,7 +678,7 @@ vuint32m2_t test_vzext_vf2_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf2_u32m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_tumu( @@ -687,7 +687,7 @@ vuint32m4_t test_vzext_vf2_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return vzext_vf2_u32m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tumu( @@ -696,7 +696,7 @@ vuint32m8_t test_vzext_vf2_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf4_u64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tumu( @@ -705,7 +705,7 @@ vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf4_u64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tumu( @@ -714,7 +714,7 @@ vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf4_u64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tumu( @@ -723,7 +723,7 @@ vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf4_u64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tumu( @@ -732,7 +732,7 @@ vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vzext_vf2_u64m1_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m1_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_tumu( @@ -741,7 +741,7 @@ vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return vzext_vf2_u64m2_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m2_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_tumu( @@ -750,7 +750,7 @@ vuint64m2_t test_vzext_vf2_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return vzext_vf2_u64m4_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m4_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_tumu( @@ -759,7 +759,7 @@ vuint64m4_t test_vzext_vf2_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vui // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return vzext_vf2_u64m8_tumu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m8_tumu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_mu( @@ -768,7 +768,7 @@ vuint64m8_t test_vzext_vf2_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuin // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf2_u16mf4_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16mf4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_mu( @@ -777,7 +777,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf2_u16mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_mu( @@ -786,7 +786,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf2_u16m1_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_mu( @@ -795,7 +795,7 @@ vuint16m1_t test_vzext_vf2_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf2_u16m2_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_mu( @@ -804,7 +804,7 @@ vuint16m2_t test_vzext_vf2_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf2_u16m4_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_mu( @@ -813,7 +813,7 @@ vuint16m4_t test_vzext_vf2_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return vzext_vf2_u16m8_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u16m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_mu( @@ -822,7 +822,7 @@ vuint16m8_t test_vzext_vf2_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf4_u32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_mu( @@ -831,7 +831,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf4_u32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_mu( @@ -840,7 +840,7 @@ vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf4_u32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_mu( @@ -849,7 +849,7 @@ vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf4_u32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_mu( @@ -858,7 +858,7 @@ vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf4_u32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_mu( @@ -867,7 +867,7 @@ vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf8_u64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_mu( @@ -876,7 +876,7 @@ vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf8_u64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_mu( @@ -885,7 +885,7 @@ vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf8_u64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_mu( @@ -894,7 +894,7 @@ vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf8_u64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf8_u64m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_mu( @@ -903,7 +903,7 @@ vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf2_u32mf2_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32mf2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_mu( @@ -912,7 +912,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vu // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf2_u32m1_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_mu( @@ -921,7 +921,7 @@ vuint32m1_t test_vzext_vf2_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf2_u32m2_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_mu( @@ -930,7 +930,7 @@ vuint32m2_t test_vzext_vf2_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf2_u32m4_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_mu( @@ -939,7 +939,7 @@ vuint32m4_t test_vzext_vf2_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return vzext_vf2_u32m8_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u32m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_mu( @@ -948,7 +948,7 @@ vuint32m8_t test_vzext_vf2_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf4_u64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_mu( @@ -957,7 +957,7 @@ vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf4_u64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_mu( @@ -966,7 +966,7 @@ vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf4_u64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_mu( @@ -975,7 +975,7 @@ vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf4_u64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf4_u64m8_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_mu( @@ -984,7 +984,7 @@ vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint1 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vzext_vf2_u64m1_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m1_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_mu( @@ -993,7 +993,7 @@ vuint64m1_t test_vzext_vf2_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return vzext_vf2_u64m2_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m2_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_mu( @@ -1002,7 +1002,7 @@ vuint64m2_t test_vzext_vf2_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return vzext_vf2_u64m4_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m4_mu(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_mu( @@ -1011,6 +1011,6 @@ vuint64m4_t test_vzext_vf2_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return vzext_vf2_u64m8_mu(mask, maskedoff, op1, vl); + return __riscv_vzext_vf2_u64m8_mu(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-index-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-index-out-of-range.c index c8c086686c42..bd8d38b791e9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-index-out-of-range.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-index-out-of-range.c @@ -6,336 +6,336 @@ #include vint8m1_t test_vget_v_index_not_constant(vint8m2_t src, int index) { - // expected-error@+1 {{argument to 'vget_v_i8m2_i8m1' must be a constant integer}} - return vget_v_i8m2_i8m1(src, index); + // expected-error@+1 {{argument to '__riscv_vget_v_i8m2_i8m1' must be a constant integer}} + return __riscv_vget_v_i8m2_i8m1(src, index); } vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i8m2_i8m1(src, 2); + return __riscv_vget_v_i8m2_i8m1(src, 2); } vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_i8m4_i8m1(src, 4); + return __riscv_vget_v_i8m4_i8m1(src, 4); } vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i8m4_i8m2(src, 2); + return __riscv_vget_v_i8m4_i8m2(src, 2); } vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vget_v_i8m8_i8m1(src, 8); + return __riscv_vget_v_i8m8_i8m1(src, 8); } vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_i8m8_i8m2(src, 4); + return __riscv_vget_v_i8m8_i8m2(src, 4); } vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i8m8_i8m4(src, 2); + return __riscv_vget_v_i8m8_i8m4(src, 2); } vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i16m2_i16m1(src, 2); + return __riscv_vget_v_i16m2_i16m1(src, 2); } vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_i16m4_i16m1(src, 4); + return __riscv_vget_v_i16m4_i16m1(src, 4); } vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i16m4_i16m2(src, 2); + return __riscv_vget_v_i16m4_i16m2(src, 2); } vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vget_v_i16m8_i16m1(src, 8); + return __riscv_vget_v_i16m8_i16m1(src, 8); } vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_i16m8_i16m2(src, 4); + return __riscv_vget_v_i16m8_i16m2(src, 4); } vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i16m8_i16m4(src, 2); + return __riscv_vget_v_i16m8_i16m4(src, 2); } vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i32m2_i32m1(src, 2); + return __riscv_vget_v_i32m2_i32m1(src, 2); } vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_i32m4_i32m1(src, 4); + return __riscv_vget_v_i32m4_i32m1(src, 4); } vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i32m4_i32m2(src, 2); + return __riscv_vget_v_i32m4_i32m2(src, 2); } vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vget_v_i32m8_i32m1(src, 8); + return __riscv_vget_v_i32m8_i32m1(src, 8); } vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_i32m8_i32m2(src, 4); + return __riscv_vget_v_i32m8_i32m2(src, 4); } vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i32m8_i32m4(src, 2); + return __riscv_vget_v_i32m8_i32m4(src, 2); } vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i64m2_i64m1(src, 2); + return __riscv_vget_v_i64m2_i64m1(src, 2); } vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_i64m4_i64m1(src, 4); + return __riscv_vget_v_i64m4_i64m1(src, 4); } vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i64m4_i64m2(src, 2); + return __riscv_vget_v_i64m4_i64m2(src, 2); } vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vget_v_i64m8_i64m1(src, 8); + return __riscv_vget_v_i64m8_i64m1(src, 8); } vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_i64m8_i64m2(src, 4); + return __riscv_vget_v_i64m8_i64m2(src, 4); } vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_i64m8_i64m4(src, 2); + return __riscv_vget_v_i64m8_i64m4(src, 2); } vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u8m2_u8m1(src, 2); + return __riscv_vget_v_u8m2_u8m1(src, 2); } vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_u8m4_u8m1(src, 4); + return __riscv_vget_v_u8m4_u8m1(src, 4); } vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u8m4_u8m2(src, 2); + return __riscv_vget_v_u8m4_u8m2(src, 2); } vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vget_v_u8m8_u8m1(src, 8); + return __riscv_vget_v_u8m8_u8m1(src, 8); } vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_u8m8_u8m2(src, 4); + return __riscv_vget_v_u8m8_u8m2(src, 4); } vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u8m8_u8m4(src, 2); + return __riscv_vget_v_u8m8_u8m4(src, 2); } vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u16m2_u16m1(src, 2); + return __riscv_vget_v_u16m2_u16m1(src, 2); } vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_u16m4_u16m1(src, 4); + return __riscv_vget_v_u16m4_u16m1(src, 4); } vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u16m4_u16m2(src, 2); + return __riscv_vget_v_u16m4_u16m2(src, 2); } vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vget_v_u16m8_u16m1(src, 8); + return __riscv_vget_v_u16m8_u16m1(src, 8); } vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_u16m8_u16m2(src, 4); + return __riscv_vget_v_u16m8_u16m2(src, 4); } vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u16m8_u16m4(src, 2); + return __riscv_vget_v_u16m8_u16m4(src, 2); } vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u32m2_u32m1(src, 2); + return __riscv_vget_v_u32m2_u32m1(src, 2); } vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_u32m4_u32m1(src, 4); + return __riscv_vget_v_u32m4_u32m1(src, 4); } vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u32m4_u32m2(src, 2); + return __riscv_vget_v_u32m4_u32m2(src, 2); } vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vget_v_u32m8_u32m1(src, 8); + return __riscv_vget_v_u32m8_u32m1(src, 8); } vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_u32m8_u32m2(src, 4); + return __riscv_vget_v_u32m8_u32m2(src, 4); } vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u32m8_u32m4(src, 2); + return __riscv_vget_v_u32m8_u32m4(src, 2); } vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u64m2_u64m1(src, 2); + return __riscv_vget_v_u64m2_u64m1(src, 2); } vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_u64m4_u64m1(src, 4); + return __riscv_vget_v_u64m4_u64m1(src, 4); } vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u64m4_u64m2(src, 2); + return __riscv_vget_v_u64m4_u64m2(src, 2); } vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vget_v_u64m8_u64m1(src, 8); + return __riscv_vget_v_u64m8_u64m1(src, 8); } vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_u64m8_u64m2(src, 4); + return __riscv_vget_v_u64m8_u64m2(src, 4); } vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_u64m8_u64m4(src, 2); + return __riscv_vget_v_u64m8_u64m4(src, 2); } vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_f32m2_f32m1(src, 2); + return __riscv_vget_v_f32m2_f32m1(src, 2); } vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_f32m4_f32m1(src, 4); + return __riscv_vget_v_f32m4_f32m1(src, 4); } vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_f32m4_f32m2(src, 2); + return __riscv_vget_v_f32m4_f32m2(src, 2); } vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vget_v_f32m8_f32m1(src, 8); + return __riscv_vget_v_f32m8_f32m1(src, 8); } vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_f32m8_f32m2(src, 4); + return __riscv_vget_v_f32m8_f32m2(src, 4); } vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_f32m8_f32m4(src, 2); + return __riscv_vget_v_f32m8_f32m4(src, 2); } vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_f64m2_f64m1(src, 2); + return __riscv_vget_v_f64m2_f64m1(src, 2); } vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_f64m4_f64m1(src, 4); + return __riscv_vget_v_f64m4_f64m1(src, 4); } vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_f64m4_f64m2(src, 2); + return __riscv_vget_v_f64m4_f64m2(src, 2); } vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vget_v_f64m8_f64m1(src, 8); + return __riscv_vget_v_f64m8_f64m1(src, 8); } vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_f64m8_f64m2(src, 4); + return __riscv_vget_v_f64m8_f64m2(src, 4); } vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_f64m8_f64m4(src, 2); + return __riscv_vget_v_f64m8_f64m4(src, 2); } vfloat16m1_t test_vget_v_f16m2_f16m1(vfloat16m2_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_f16m2_f16m1(src, 2); + return __riscv_vget_v_f16m2_f16m1(src, 2); } vfloat16m1_t test_vget_v_f16m4_f16m1(vfloat16m4_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_f16m4_f16m1(src, 4); + return __riscv_vget_v_f16m4_f16m1(src, 4); } vfloat16m1_t test_vget_v_f16m8_f16m1(vfloat16m8_t src) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vget_v_f16m8_f16m1(src, 8); + return __riscv_vget_v_f16m8_f16m1(src, 8); } vfloat16m2_t test_vget_v_f16m4_f16m2(vfloat16m4_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_f16m4_f16m2(src, 2); + return __riscv_vget_v_f16m4_f16m2(src, 2); } vfloat16m2_t test_vget_v_f16m8_f16m2(vfloat16m8_t src) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vget_v_f16m8_f16m2(src, 4); + return __riscv_vget_v_f16m8_f16m2(src, 4); } vfloat16m4_t test_vget_v_f16m8_f16m4(vfloat16m8_t src) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vget_v_f16m8_f16m4(src, 2); + return __riscv_vget_v_f16m8_f16m4(src, 2); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-vset-ice.cpp b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-vset-ice.cpp index b81aacc98c14..0a73376d67ea 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-vset-ice.cpp +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vget-vset-ice.cpp @@ -16,7 +16,7 @@ constexpr int foo() { return 1; } // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) { - return vget_v_i8m2_i8m1(src, foo()); + return __riscv_vget_v_i8m2_i8m1(src, foo()); } // CHECK-RV64-LABEL: @_Z21test_vset_v_i8m1_i8m2u14__rvv_int8m2_tu14__rvv_int8m1_t @@ -25,5 +25,5 @@ vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) { - return vset_v_i8m1_i8m2(dest, foo(), val); + return __riscv_vset_v_i8m1_i8m2(dest, foo(), val); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64.c index ea0c5f8f0061..a8d1c451bf4a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh-eew64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh_vv_i64m1(op1, op2, vl); + return __riscv_vmulh_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1( @@ -22,7 +22,7 @@ vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m1(op1, op2, vl); + return __riscv_vmulh_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2( @@ -31,7 +31,7 @@ vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh_vv_i64m2(op1, op2, vl); + return __riscv_vmulh_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2( @@ -40,7 +40,7 @@ vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m2(op1, op2, vl); + return __riscv_vmulh_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4( @@ -49,7 +49,7 @@ vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh_vv_i64m4(op1, op2, vl); + return __riscv_vmulh_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4( @@ -58,7 +58,7 @@ vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m4(op1, op2, vl); + return __riscv_vmulh_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8( @@ -67,7 +67,7 @@ vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh_vv_i64m8(op1, op2, vl); + return __riscv_vmulh_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8( @@ -76,7 +76,7 @@ vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m8(op1, op2, vl); + return __riscv_vmulh_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( @@ -85,7 +85,7 @@ vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( @@ -94,7 +94,7 @@ vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( @@ -103,7 +103,7 @@ vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( @@ -112,7 +112,7 @@ vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( @@ -121,7 +121,7 @@ vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( @@ -130,7 +130,7 @@ vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( @@ -139,7 +139,7 @@ vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( @@ -148,7 +148,7 @@ vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh.c index 57f8cf21961c..90fb6e6f6866 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulh.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh_vv_i8mf8(op1, op2, vl); + return __riscv_vmulh_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf8(op1, op2, vl); + return __riscv_vmulh_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh_vv_i8mf4(op1, op2, vl); + return __riscv_vmulh_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf4(op1, op2, vl); + return __riscv_vmulh_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh_vv_i8mf2(op1, op2, vl); + return __riscv_vmulh_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf2(op1, op2, vl); + return __riscv_vmulh_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh_vv_i8m1(op1, op2, vl); + return __riscv_vmulh_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m1(op1, op2, vl); + return __riscv_vmulh_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh_vv_i8m2(op1, op2, vl); + return __riscv_vmulh_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m2(op1, op2, vl); + return __riscv_vmulh_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh_vv_i8m4(op1, op2, vl); + return __riscv_vmulh_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m4(op1, op2, vl); + return __riscv_vmulh_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh_vv_i8m8(op1, op2, vl); + return __riscv_vmulh_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m8(op1, op2, vl); + return __riscv_vmulh_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh_vv_i16mf4(op1, op2, vl); + return __riscv_vmulh_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf4(op1, op2, vl); + return __riscv_vmulh_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh_vv_i16mf2(op1, op2, vl); + return __riscv_vmulh_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf2(op1, op2, vl); + return __riscv_vmulh_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh_vv_i16m1(op1, op2, vl); + return __riscv_vmulh_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m1(op1, op2, vl); + return __riscv_vmulh_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh_vv_i16m2(op1, op2, vl); + return __riscv_vmulh_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m2(op1, op2, vl); + return __riscv_vmulh_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh_vv_i16m4(op1, op2, vl); + return __riscv_vmulh_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m4(op1, op2, vl); + return __riscv_vmulh_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh_vv_i16m8(op1, op2, vl); + return __riscv_vmulh_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m8(op1, op2, vl); + return __riscv_vmulh_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh_vv_i32mf2(op1, op2, vl); + return __riscv_vmulh_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32mf2(op1, op2, vl); + return __riscv_vmulh_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh_vv_i32m1(op1, op2, vl); + return __riscv_vmulh_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m1(op1, op2, vl); + return __riscv_vmulh_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh_vv_i32m2(op1, op2, vl); + return __riscv_vmulh_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m2(op1, op2, vl); + return __riscv_vmulh_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh_vv_i32m4(op1, op2, vl); + return __riscv_vmulh_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m4(op1, op2, vl); + return __riscv_vmulh_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh_vv_i32m8(op1, op2, vl); + return __riscv_vmulh_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m8(op1, op2, vl); + return __riscv_vmulh_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( @@ -336,7 +336,7 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( @@ -345,7 +345,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( @@ -354,7 +354,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( @@ -363,7 +363,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( @@ -372,7 +372,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( @@ -381,7 +381,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( @@ -390,7 +390,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( @@ -399,7 +399,7 @@ vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( @@ -408,7 +408,7 @@ vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( @@ -417,7 +417,7 @@ vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( @@ -426,7 +426,7 @@ vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( @@ -435,7 +435,7 @@ vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( @@ -444,7 +444,7 @@ vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( @@ -453,7 +453,7 @@ vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( @@ -462,7 +462,7 @@ vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( @@ -471,7 +471,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( @@ -480,7 +480,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( @@ -489,7 +489,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( @@ -498,7 +498,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( @@ -507,7 +507,7 @@ vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( @@ -516,7 +516,7 @@ vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( @@ -525,7 +525,7 @@ vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( @@ -534,7 +534,7 @@ vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( @@ -543,7 +543,7 @@ vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( @@ -552,7 +552,7 @@ vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( @@ -561,7 +561,7 @@ vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( @@ -570,7 +570,7 @@ vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( @@ -579,7 +579,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( @@ -588,7 +588,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( @@ -597,7 +597,7 @@ vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( @@ -606,7 +606,7 @@ vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( @@ -615,7 +615,7 @@ vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( @@ -624,7 +624,7 @@ vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( @@ -633,7 +633,7 @@ vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( @@ -642,7 +642,7 @@ vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( @@ -651,5 +651,5 @@ vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vmulh_vx_i32m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64.c index d2ccee89c42b..4beb926f2c25 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu-eew64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu_vv_i64m1(op1, op2, vl); + return __riscv_vmulhsu_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1( @@ -22,7 +22,7 @@ vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m1(op1, op2, vl); + return __riscv_vmulhsu_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2( @@ -31,7 +31,7 @@ vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu_vv_i64m2(op1, op2, vl); + return __riscv_vmulhsu_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2( @@ -40,7 +40,7 @@ vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m2(op1, op2, vl); + return __riscv_vmulhsu_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4( @@ -49,7 +49,7 @@ vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu_vv_i64m4(op1, op2, vl); + return __riscv_vmulhsu_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4( @@ -58,7 +58,7 @@ vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m4(op1, op2, vl); + return __riscv_vmulhsu_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8( @@ -67,7 +67,7 @@ vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu_vv_i64m8(op1, op2, vl); + return __riscv_vmulhsu_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8( @@ -76,7 +76,7 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m8(op1, op2, vl); + return __riscv_vmulhsu_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( @@ -85,7 +85,7 @@ vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( @@ -94,7 +94,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( @@ -103,7 +103,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( @@ -112,7 +112,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( @@ -121,7 +121,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( @@ -130,7 +130,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( @@ -139,7 +139,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( @@ -148,5 +148,5 @@ vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu.c index 8300d5ae1121..57b3ea328059 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhsu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu_vv_i8mf8(op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8( @@ -21,7 +21,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf8(op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4( @@ -30,7 +30,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu_vv_i8mf4(op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4( @@ -39,7 +39,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf4(op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2( @@ -48,7 +48,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu_vv_i8mf2(op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2( @@ -57,7 +57,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf2(op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1( @@ -66,7 +66,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu_vv_i8m1(op1, op2, vl); + return __riscv_vmulhsu_vv_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1( @@ -75,7 +75,7 @@ vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m1(op1, op2, vl); + return __riscv_vmulhsu_vx_i8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2( @@ -84,7 +84,7 @@ vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu_vv_i8m2(op1, op2, vl); + return __riscv_vmulhsu_vv_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2( @@ -93,7 +93,7 @@ vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m2(op1, op2, vl); + return __riscv_vmulhsu_vx_i8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4( @@ -102,7 +102,7 @@ vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu_vv_i8m4(op1, op2, vl); + return __riscv_vmulhsu_vv_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4( @@ -111,7 +111,7 @@ vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m4(op1, op2, vl); + return __riscv_vmulhsu_vx_i8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8( @@ -120,7 +120,7 @@ vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu_vv_i8m8(op1, op2, vl); + return __riscv_vmulhsu_vv_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8( @@ -129,7 +129,7 @@ vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m8(op1, op2, vl); + return __riscv_vmulhsu_vx_i8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4( @@ -138,7 +138,7 @@ vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu_vv_i16mf4(op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4( @@ -147,7 +147,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf4(op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2( @@ -156,7 +156,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu_vv_i16mf2(op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2( @@ -165,7 +165,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf2(op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1( @@ -174,7 +174,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu_vv_i16m1(op1, op2, vl); + return __riscv_vmulhsu_vv_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1( @@ -183,7 +183,7 @@ vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m1(op1, op2, vl); + return __riscv_vmulhsu_vx_i16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2( @@ -192,7 +192,7 @@ vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu_vv_i16m2(op1, op2, vl); + return __riscv_vmulhsu_vv_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2( @@ -201,7 +201,7 @@ vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m2(op1, op2, vl); + return __riscv_vmulhsu_vx_i16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4( @@ -210,7 +210,7 @@ vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu_vv_i16m4(op1, op2, vl); + return __riscv_vmulhsu_vv_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4( @@ -219,7 +219,7 @@ vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m4(op1, op2, vl); + return __riscv_vmulhsu_vx_i16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8( @@ -228,7 +228,7 @@ vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu_vv_i16m8(op1, op2, vl); + return __riscv_vmulhsu_vv_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8( @@ -237,7 +237,7 @@ vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m8(op1, op2, vl); + return __riscv_vmulhsu_vx_i16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2( @@ -246,7 +246,7 @@ vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu_vv_i32mf2(op1, op2, vl); + return __riscv_vmulhsu_vv_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2( @@ -255,7 +255,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32mf2(op1, op2, vl); + return __riscv_vmulhsu_vx_i32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1( @@ -264,7 +264,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu_vv_i32m1(op1, op2, vl); + return __riscv_vmulhsu_vv_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1( @@ -273,7 +273,7 @@ vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m1(op1, op2, vl); + return __riscv_vmulhsu_vx_i32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2( @@ -282,7 +282,7 @@ vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu_vv_i32m2(op1, op2, vl); + return __riscv_vmulhsu_vv_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2( @@ -291,7 +291,7 @@ vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m2(op1, op2, vl); + return __riscv_vmulhsu_vx_i32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4( @@ -300,7 +300,7 @@ vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu_vv_i32m4(op1, op2, vl); + return __riscv_vmulhsu_vv_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4( @@ -309,7 +309,7 @@ vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m4(op1, op2, vl); + return __riscv_vmulhsu_vx_i32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8( @@ -318,7 +318,7 @@ vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu_vv_i32m8(op1, op2, vl); + return __riscv_vmulhsu_vv_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8( @@ -327,7 +327,7 @@ vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m8(op1, op2, vl); + return __riscv_vmulhsu_vx_i32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( @@ -336,7 +336,7 @@ vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu_vv_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( @@ -345,7 +345,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( @@ -354,7 +354,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu_vv_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( @@ -363,7 +363,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( @@ -372,7 +372,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu_vv_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( @@ -381,7 +381,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( @@ -390,7 +390,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu_vv_i8m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( @@ -399,7 +399,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( @@ -408,7 +408,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu_vv_i8m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( @@ -417,7 +417,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( @@ -426,7 +426,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu_vv_i8m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( @@ -435,7 +435,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( @@ -444,7 +444,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu_vv_i8m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( @@ -453,7 +453,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, s // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu_vx_i8m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( @@ -462,7 +462,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu_vv_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( @@ -471,7 +471,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( @@ -480,7 +480,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu_vv_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( @@ -489,7 +489,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( @@ -498,7 +498,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu_vv_i16m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( @@ -507,7 +507,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( @@ -516,7 +516,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu_vv_i16m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( @@ -525,7 +525,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( @@ -534,7 +534,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu_vv_i16m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( @@ -543,7 +543,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( @@ -552,7 +552,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu_vv_i16m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( @@ -561,7 +561,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu_vx_i16m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( @@ -570,7 +570,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu_vv_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( @@ -579,7 +579,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2 // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32mf2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( @@ -588,7 +588,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu_vv_i32m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( @@ -597,7 +597,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m1_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( @@ -606,7 +606,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu_vv_i32m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( @@ -615,7 +615,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m2_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( @@ -624,7 +624,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu_vv_i32m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( @@ -633,7 +633,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m4_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( @@ -642,7 +642,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu_vv_i32m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vv_i32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( @@ -651,5 +651,5 @@ vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu_vx_i32m8_m(mask, op1, op2, vl); + return __riscv_vmulhsu_vx_i32m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64.c index 45002407fa93..4bf94dc1dfde 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu-eew64.c @@ -13,7 +13,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu_vv_u64m1(op1, op2, vl); + return __riscv_vmulhu_vv_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1( @@ -22,7 +22,7 @@ vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m1(op1, op2, vl); + return __riscv_vmulhu_vx_u64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2( @@ -31,7 +31,7 @@ vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu_vv_u64m2(op1, op2, vl); + return __riscv_vmulhu_vv_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2( @@ -40,7 +40,7 @@ vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m2(op1, op2, vl); + return __riscv_vmulhu_vx_u64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4( @@ -49,7 +49,7 @@ vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu_vv_u64m4(op1, op2, vl); + return __riscv_vmulhu_vv_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4( @@ -58,7 +58,7 @@ vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m4(op1, op2, vl); + return __riscv_vmulhu_vx_u64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8( @@ -67,7 +67,7 @@ vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu_vv_u64m8(op1, op2, vl); + return __riscv_vmulhu_vv_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8( @@ -76,7 +76,7 @@ vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m8(op1, op2, vl); + return __riscv_vmulhu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( @@ -85,7 +85,7 @@ vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu_vv_u64m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( @@ -94,7 +94,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( @@ -103,7 +103,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu_vv_u64m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( @@ -112,7 +112,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( @@ -121,7 +121,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu_vv_u64m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( @@ -130,7 +130,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( @@ -139,7 +139,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu_vv_u64m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( @@ -148,5 +148,5 @@ vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu_vx_u64m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu.c index 2d782901c41e..f84e9170e681 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vmulhu.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu_vv_u8mf8(op1, op2, vl); + return __riscv_vmulhu_vv_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8( @@ -21,7 +21,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf8(op1, op2, vl); + return __riscv_vmulhu_vx_u8mf8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4( @@ -30,7 +30,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu_vv_u8mf4(op1, op2, vl); + return __riscv_vmulhu_vv_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4( @@ -39,7 +39,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf4(op1, op2, vl); + return __riscv_vmulhu_vx_u8mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2( @@ -48,7 +48,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu_vv_u8mf2(op1, op2, vl); + return __riscv_vmulhu_vv_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2( @@ -57,7 +57,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf2(op1, op2, vl); + return __riscv_vmulhu_vx_u8mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1( @@ -66,7 +66,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu_vv_u8m1(op1, op2, vl); + return __riscv_vmulhu_vv_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1( @@ -75,7 +75,7 @@ vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m1(op1, op2, vl); + return __riscv_vmulhu_vx_u8m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2( @@ -84,7 +84,7 @@ vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu_vv_u8m2(op1, op2, vl); + return __riscv_vmulhu_vv_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2( @@ -93,7 +93,7 @@ vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m2(op1, op2, vl); + return __riscv_vmulhu_vx_u8m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4( @@ -102,7 +102,7 @@ vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu_vv_u8m4(op1, op2, vl); + return __riscv_vmulhu_vv_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4( @@ -111,7 +111,7 @@ vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m4(op1, op2, vl); + return __riscv_vmulhu_vx_u8m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8( @@ -120,7 +120,7 @@ vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu_vv_u8m8(op1, op2, vl); + return __riscv_vmulhu_vv_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8( @@ -129,7 +129,7 @@ vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m8(op1, op2, vl); + return __riscv_vmulhu_vx_u8m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4( @@ -138,7 +138,7 @@ vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu_vv_u16mf4(op1, op2, vl); + return __riscv_vmulhu_vv_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4( @@ -147,7 +147,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf4(op1, op2, vl); + return __riscv_vmulhu_vx_u16mf4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2( @@ -156,7 +156,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu_vv_u16mf2(op1, op2, vl); + return __riscv_vmulhu_vv_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2( @@ -165,7 +165,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf2(op1, op2, vl); + return __riscv_vmulhu_vx_u16mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1( @@ -174,7 +174,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu_vv_u16m1(op1, op2, vl); + return __riscv_vmulhu_vv_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1( @@ -183,7 +183,7 @@ vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m1(op1, op2, vl); + return __riscv_vmulhu_vx_u16m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2( @@ -192,7 +192,7 @@ vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu_vv_u16m2(op1, op2, vl); + return __riscv_vmulhu_vv_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2( @@ -201,7 +201,7 @@ vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m2(op1, op2, vl); + return __riscv_vmulhu_vx_u16m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4( @@ -210,7 +210,7 @@ vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu_vv_u16m4(op1, op2, vl); + return __riscv_vmulhu_vv_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4( @@ -219,7 +219,7 @@ vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m4(op1, op2, vl); + return __riscv_vmulhu_vx_u16m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8( @@ -228,7 +228,7 @@ vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu_vv_u16m8(op1, op2, vl); + return __riscv_vmulhu_vv_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8( @@ -237,7 +237,7 @@ vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m8(op1, op2, vl); + return __riscv_vmulhu_vx_u16m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2( @@ -246,7 +246,7 @@ vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu_vv_u32mf2(op1, op2, vl); + return __riscv_vmulhu_vv_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2( @@ -255,7 +255,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32mf2(op1, op2, vl); + return __riscv_vmulhu_vx_u32mf2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1( @@ -264,7 +264,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu_vv_u32m1(op1, op2, vl); + return __riscv_vmulhu_vv_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1( @@ -273,7 +273,7 @@ vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m1(op1, op2, vl); + return __riscv_vmulhu_vx_u32m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2( @@ -282,7 +282,7 @@ vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu_vv_u32m2(op1, op2, vl); + return __riscv_vmulhu_vv_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2( @@ -291,7 +291,7 @@ vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m2(op1, op2, vl); + return __riscv_vmulhu_vx_u32m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4( @@ -300,7 +300,7 @@ vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu_vv_u32m4(op1, op2, vl); + return __riscv_vmulhu_vv_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4( @@ -309,7 +309,7 @@ vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m4(op1, op2, vl); + return __riscv_vmulhu_vx_u32m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8( @@ -318,7 +318,7 @@ vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu_vv_u32m8(op1, op2, vl); + return __riscv_vmulhu_vv_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8( @@ -327,7 +327,7 @@ vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m8(op1, op2, vl); + return __riscv_vmulhu_vx_u32m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( @@ -336,7 +336,7 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu_vv_u8mf8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( @@ -345,7 +345,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( @@ -354,7 +354,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu_vv_u8mf4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( @@ -363,7 +363,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( @@ -372,7 +372,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu_vv_u8mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( @@ -381,7 +381,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( @@ -390,7 +390,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu_vv_u8m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( @@ -399,7 +399,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( @@ -408,7 +408,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu_vv_u8m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( @@ -417,7 +417,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( @@ -426,7 +426,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu_vv_u8m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( @@ -435,7 +435,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( @@ -444,7 +444,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu_vv_u8m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( @@ -453,7 +453,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu_vx_u8m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u8m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( @@ -462,7 +462,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, siz // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu_vv_u16mf4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( @@ -471,7 +471,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( @@ -480,7 +480,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu_vv_u16mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( @@ -489,7 +489,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( @@ -498,7 +498,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu_vv_u16m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( @@ -507,7 +507,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( @@ -516,7 +516,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu_vv_u16m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( @@ -525,7 +525,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( @@ -534,7 +534,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu_vv_u16m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( @@ -543,7 +543,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( @@ -552,7 +552,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu_vv_u16m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( @@ -561,7 +561,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu_vx_u16m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u16m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( @@ -570,7 +570,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu_vv_u32mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( @@ -579,7 +579,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32mf2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u32mf2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( @@ -588,7 +588,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu_vv_u32m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( @@ -597,7 +597,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m1_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u32m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( @@ -606,7 +606,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu_vv_u32m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( @@ -615,7 +615,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m2_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u32m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( @@ -624,7 +624,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2 // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu_vv_u32m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( @@ -633,7 +633,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m4_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u32m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( @@ -642,7 +642,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu_vv_u32m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vv_u32m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( @@ -651,5 +651,5 @@ vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t o // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu_vx_u32m8_m(mask, op1, op2, vl); + return __riscv_vmulhu_vx_u32m8_m(mask, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vset-index-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vset-index-out-of-range.c index 0bb86e195874..fad13d9ba23c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vset-index-out-of-range.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vset-index-out-of-range.c @@ -6,336 +6,336 @@ #include vint8m1_t test_vset_v_index_not_constant(vint8m2_t dest, vint8m1_t val, int index) { - // expected-error@+1 {{argument to 'vset_v_i8m1_i8m2' must be a constant integer}} - return vset_v_i8m1_i8m2(dest, index, val); + // expected-error@+1 {{argument to '__riscv_vset_v_i8m1_i8m2' must be a constant integer}} + return __riscv_vset_v_i8m1_i8m2(dest, index, val); } vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i8m1_i8m2(dest, 2, val); + return __riscv_vset_v_i8m1_i8m2(dest, 2, val); } vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, vint8m1_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_i8m1_i8m4(dest, 4, val); + return __riscv_vset_v_i8m1_i8m4(dest, 4, val); } vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, vint8m2_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i8m2_i8m4(dest, 2, val); + return __riscv_vset_v_i8m2_i8m4(dest, 2, val); } vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, vint8m1_t val) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vset_v_i8m1_i8m8(dest, 8, val); + return __riscv_vset_v_i8m1_i8m8(dest, 8, val); } vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, vint8m2_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_i8m2_i8m8(dest, 4, val); + return __riscv_vset_v_i8m2_i8m8(dest, 4, val); } vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, vint8m4_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i8m4_i8m8(dest, 2, val); + return __riscv_vset_v_i8m4_i8m8(dest, 2, val); } vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, vint16m1_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i16m1_i16m2(dest, 2, val); + return __riscv_vset_v_i16m1_i16m2(dest, 2, val); } vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, vint16m1_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_i16m1_i16m4(dest, 4, val); + return __riscv_vset_v_i16m1_i16m4(dest, 4, val); } vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, vint16m2_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i16m2_i16m4(dest, 2, val); + return __riscv_vset_v_i16m2_i16m4(dest, 2, val); } vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, vint16m1_t val) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vset_v_i16m1_i16m8(dest, 8, val); + return __riscv_vset_v_i16m1_i16m8(dest, 8, val); } vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, vint16m2_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_i16m2_i16m8(dest, 4, val); + return __riscv_vset_v_i16m2_i16m8(dest, 4, val); } vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, vint16m4_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i16m4_i16m8(dest, 2, val); + return __riscv_vset_v_i16m4_i16m8(dest, 2, val); } vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, vint32m1_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i32m1_i32m2(dest, 2, val); + return __riscv_vset_v_i32m1_i32m2(dest, 2, val); } vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, vint32m1_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_i32m1_i32m4(dest, 4, val); + return __riscv_vset_v_i32m1_i32m4(dest, 4, val); } vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, vint32m2_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i32m2_i32m4(dest, 2, val); + return __riscv_vset_v_i32m2_i32m4(dest, 2, val); } vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, vint32m1_t val) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vset_v_i32m1_i32m8(dest, 8, val); + return __riscv_vset_v_i32m1_i32m8(dest, 8, val); } vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, vint32m2_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_i32m2_i32m8(dest, 4, val); + return __riscv_vset_v_i32m2_i32m8(dest, 4, val); } vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, vint32m4_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i32m4_i32m8(dest, 2, val); + return __riscv_vset_v_i32m4_i32m8(dest, 2, val); } vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, vint64m1_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i64m1_i64m2(dest, 2, val); + return __riscv_vset_v_i64m1_i64m2(dest, 2, val); } vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, vint64m1_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_i64m1_i64m4(dest, 4, val); + return __riscv_vset_v_i64m1_i64m4(dest, 4, val); } vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, vint64m2_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i64m2_i64m4(dest, 2, val); + return __riscv_vset_v_i64m2_i64m4(dest, 2, val); } vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, vint64m1_t val) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vset_v_i64m1_i64m8(dest, 8, val); + return __riscv_vset_v_i64m1_i64m8(dest, 8, val); } vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, vint64m2_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_i64m2_i64m8(dest, 4, val); + return __riscv_vset_v_i64m2_i64m8(dest, 4, val); } vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, vint64m4_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_i64m4_i64m8(dest, 2, val); + return __riscv_vset_v_i64m4_i64m8(dest, 2, val); } vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, vuint8m1_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u8m1_u8m2(dest, 2, val); + return __riscv_vset_v_u8m1_u8m2(dest, 2, val); } vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, vuint8m1_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_u8m1_u8m4(dest, 4, val); + return __riscv_vset_v_u8m1_u8m4(dest, 4, val); } vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, vuint8m2_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u8m2_u8m4(dest, 2, val); + return __riscv_vset_v_u8m2_u8m4(dest, 2, val); } vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, vuint8m1_t val) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vset_v_u8m1_u8m8(dest, 8, val); + return __riscv_vset_v_u8m1_u8m8(dest, 8, val); } vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, vuint8m2_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_u8m2_u8m8(dest, 4, val); + return __riscv_vset_v_u8m2_u8m8(dest, 4, val); } vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, vuint8m4_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u8m4_u8m8(dest, 2, val); + return __riscv_vset_v_u8m4_u8m8(dest, 2, val); } vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, vuint16m1_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u16m1_u16m2(dest, 2, val); + return __riscv_vset_v_u16m1_u16m2(dest, 2, val); } vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, vuint16m1_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_u16m1_u16m4(dest, 4, val); + return __riscv_vset_v_u16m1_u16m4(dest, 4, val); } vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, vuint16m2_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u16m2_u16m4(dest, 2, val); + return __riscv_vset_v_u16m2_u16m4(dest, 2, val); } vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, vuint16m1_t val) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vset_v_u16m1_u16m8(dest, 8, val); + return __riscv_vset_v_u16m1_u16m8(dest, 8, val); } vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, vuint16m2_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_u16m2_u16m8(dest, 4, val); + return __riscv_vset_v_u16m2_u16m8(dest, 4, val); } vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, vuint16m4_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u16m4_u16m8(dest, 2, val); + return __riscv_vset_v_u16m4_u16m8(dest, 2, val); } vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, vuint32m1_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u32m1_u32m2(dest, 2, val); + return __riscv_vset_v_u32m1_u32m2(dest, 2, val); } vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, vuint32m1_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_u32m1_u32m4(dest, 4, val); + return __riscv_vset_v_u32m1_u32m4(dest, 4, val); } vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, vuint32m2_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u32m2_u32m4(dest, 2, val); + return __riscv_vset_v_u32m2_u32m4(dest, 2, val); } vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, vuint32m1_t val) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vset_v_u32m1_u32m8(dest, 8, val); + return __riscv_vset_v_u32m1_u32m8(dest, 8, val); } vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, vuint32m2_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_u32m2_u32m8(dest, 4, val); + return __riscv_vset_v_u32m2_u32m8(dest, 4, val); } vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, vuint32m4_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u32m4_u32m8(dest, 2, val); + return __riscv_vset_v_u32m4_u32m8(dest, 2, val); } vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, vuint64m1_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u64m1_u64m2(dest, 2, val); + return __riscv_vset_v_u64m1_u64m2(dest, 2, val); } vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, vuint64m1_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_u64m1_u64m4(dest, 4, val); + return __riscv_vset_v_u64m1_u64m4(dest, 4, val); } vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, vuint64m2_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u64m2_u64m4(dest, 2, val); + return __riscv_vset_v_u64m2_u64m4(dest, 2, val); } vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, vuint64m1_t val) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vset_v_u64m1_u64m8(dest, 8, val); + return __riscv_vset_v_u64m1_u64m8(dest, 8, val); } vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, vuint64m2_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_u64m2_u64m8(dest, 4, val); + return __riscv_vset_v_u64m2_u64m8(dest, 4, val); } vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, vuint64m4_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_u64m4_u64m8(dest, 2, val); + return __riscv_vset_v_u64m4_u64m8(dest, 2, val); } vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, vfloat32m1_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_f32m1_f32m2(dest, 2, val); + return __riscv_vset_v_f32m1_f32m2(dest, 2, val); } vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, vfloat32m1_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_f32m1_f32m4(dest, 4, val); + return __riscv_vset_v_f32m1_f32m4(dest, 4, val); } vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, vfloat32m2_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_f32m2_f32m4(dest, 2, val); + return __riscv_vset_v_f32m2_f32m4(dest, 2, val); } vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, vfloat32m1_t val) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vset_v_f32m1_f32m8(dest, 8, val); + return __riscv_vset_v_f32m1_f32m8(dest, 8, val); } vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, vfloat32m2_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_f32m2_f32m8(dest, 4, val); + return __riscv_vset_v_f32m2_f32m8(dest, 4, val); } vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, vfloat32m4_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_f32m4_f32m8(dest, 2, val); + return __riscv_vset_v_f32m4_f32m8(dest, 2, val); } vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, vfloat64m1_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_f64m1_f64m2(dest, 2, val); + return __riscv_vset_v_f64m1_f64m2(dest, 2, val); } vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, vfloat64m1_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_f64m1_f64m4(dest, 4, val); + return __riscv_vset_v_f64m1_f64m4(dest, 4, val); } vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, vfloat64m2_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_f64m2_f64m4(dest, 2, val); + return __riscv_vset_v_f64m2_f64m4(dest, 2, val); } vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, vfloat64m1_t val) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vset_v_f64m1_f64m8(dest, 8, val); + return __riscv_vset_v_f64m1_f64m8(dest, 8, val); } vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, vfloat64m2_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_f64m2_f64m8(dest, 4, val); + return __riscv_vset_v_f64m2_f64m8(dest, 4, val); } vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, vfloat64m4_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_f64m4_f64m8(dest, 2, val); + return __riscv_vset_v_f64m4_f64m8(dest, 2, val); } vfloat16m2_t test_vset_v_f16m1_f16m2(vfloat16m2_t dest, vfloat16m1_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_f16m1_f16m2(dest, 2, val); + return __riscv_vset_v_f16m1_f16m2(dest, 2, val); } vfloat16m4_t test_vset_v_f16m1_f16m4(vfloat16m4_t dest, vfloat16m1_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_f16m1_f16m4(dest, 4, val); + return __riscv_vset_v_f16m1_f16m4(dest, 4, val); } vfloat16m4_t test_vset_v_f16m2_f16m4(vfloat16m4_t dest, vfloat16m2_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_f16m2_f16m4(dest, 2, val); + return __riscv_vset_v_f16m2_f16m4(dest, 2, val); } vfloat16m8_t test_vset_v_f16m1_f16m8(vfloat16m8_t dest, vfloat16m1_t val) { // expected-error@+1 {{argument value 8 is outside the valid range [0, 7]}} - return vset_v_f16m1_f16m8(dest, 8, val); + return __riscv_vset_v_f16m1_f16m8(dest, 8, val); } vfloat16m8_t test_vset_v_f16m2_f16m8(vfloat16m8_t dest, vfloat16m2_t val) { // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} - return vset_v_f16m2_f16m8(dest, 4, val); + return __riscv_vset_v_f16m2_f16m8(dest, 4, val); } vfloat16m8_t test_vset_v_f16m4_f16m8(vfloat16m8_t dest, vfloat16m4_t val) { // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} - return vset_v_f16m4_f16m8(dest, 2, val); + return __riscv_vset_v_f16m4_f16m8(dest, 2, val); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c index 803ca6f72d2b..03a96ade8f87 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vsmul-eew64.c @@ -12,7 +12,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul_vv_i64m1(op1, op2, vl); + return __riscv_vsmul_vv_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( @@ -21,7 +21,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m1(op1, op2, vl); + return __riscv_vsmul_vx_i64m1(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( @@ -30,7 +30,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul_vv_i64m2(op1, op2, vl); + return __riscv_vsmul_vv_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( @@ -39,7 +39,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m2(op1, op2, vl); + return __riscv_vsmul_vx_i64m2(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( @@ -48,7 +48,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul_vv_i64m4(op1, op2, vl); + return __riscv_vsmul_vv_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( @@ -57,7 +57,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m4(op1, op2, vl); + return __riscv_vsmul_vx_i64m4(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( @@ -66,7 +66,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul_vv_i64m8(op1, op2, vl); + return __riscv_vsmul_vv_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( @@ -75,7 +75,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m8(op1, op2, vl); + return __riscv_vsmul_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( @@ -84,7 +84,7 @@ vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul_vv_i64m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( @@ -93,7 +93,7 @@ vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m1_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( @@ -102,7 +102,7 @@ vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul_vv_i64m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( @@ -111,7 +111,7 @@ vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m2_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( @@ -120,7 +120,7 @@ vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul_vv_i64m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( @@ -129,7 +129,7 @@ vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m4_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( @@ -138,7 +138,7 @@ vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, si // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul_vv_i64m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( @@ -147,5 +147,5 @@ vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul_vx_i64m8_m(mask, op1, op2, vl); + return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, vl); } diff --git a/clang/test/Sema/uninit-variables-riscv-vector.c b/clang/test/Sema/uninit-variables-riscv-vector.c index 0f91b0d75710..91af7514656b 100644 --- a/clang/test/Sema/uninit-variables-riscv-vector.c +++ b/clang/test/Sema/uninit-variables-riscv-vector.c @@ -4,11 +4,11 @@ void test1(int *input, long vl) { __rvv_int32m1_t x, y, z, w, X; // expected-note {{variable 'x' is declared here}} expected-note {{variable 'y' is declared here}} expected-note {{variable 'w' is declared here}} expected-note {{variable 'z' is declared here}} - x = vxor_vv_i32m1(x,x, vl); // expected-warning {{variable 'x' is uninitialized when used here}} - y = vxor_vv_i32m1(y,y, vl); // expected-warning {{variable 'y' is uninitialized when used here}} - z = vxor_vv_i32m1(z,z, vl); // expected-warning {{variable 'z' is uninitialized when used here}} - w = vxor_vv_i32m1(w,w, vl); // expected-warning {{variable 'w' is uninitialized when used here}} - X = vle32_v_i32m1(&input[0], vl); - X = vxor_vv_i32m1(X,X, vl); // no-warning + x = __riscv_vxor_vv_i32m1(x,x, vl); // expected-warning {{variable 'x' is uninitialized when used here}} + y = __riscv_vxor_vv_i32m1(y,y, vl); // expected-warning {{variable 'y' is uninitialized when used here}} + z = __riscv_vxor_vv_i32m1(z,z, vl); // expected-warning {{variable 'z' is uninitialized when used here}} + w = __riscv_vxor_vv_i32m1(w,w, vl); // expected-warning {{variable 'w' is uninitialized when used here}} + X = __riscv_vle32_v_i32m1(&input[0], vl); + X = __riscv_vxor_vv_i32m1(X,X, vl); // no-warning } -- GitLab